code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a :
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : Dict=14 , snake_case__ : List[str]=7 , snake_case__ : Any=True , snake_case__ : Tuple=True , snake_case__ : str=False , snake_case__ : Dict=True , snake_case__ : List[str]=99 , snake_case__ : List[Any]=32 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[Any]=4 , snake_case__ : List[Any]=4 , snake_case__ : Dict=37 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : str=512 , snake_case__ : Dict=0.0_2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = None
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = model_class_name(lowerCamelCase_ )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
__lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
__lowerCAmelCase = model(lowerCamelCase_ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int ):
"""simple docstring"""
__lowerCAmelCase = 20
__lowerCAmelCase = model_class_name(lowerCamelCase_ )
__lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
__lowerCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase_ : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
__lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
__lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = False
__lowerCAmelCase = model.config.eos_token_id
__lowerCAmelCase = jax.jit(model.generate )
__lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
__lowerCAmelCase = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
__lowerCAmelCase = pt_inputs['''input_ids'''].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = pt_model_class(lowerCamelCase_ ).eval()
__lowerCAmelCase = model_class(lowerCamelCase_ , dtype=jnp.floataa )
__lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
__lowerCAmelCase = fx_state
with torch.no_grad():
__lowerCAmelCase = pt_model(**lowerCamelCase_ ).to_tuple()
__lowerCAmelCase = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
__lowerCAmelCase = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
__lowerCAmelCase = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
__lowerCAmelCase = pt_model_class(lowerCamelCase_ ).eval()
__lowerCAmelCase = model_class(lowerCamelCase_ , dtype=jnp.floataa )
__lowerCAmelCase = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
__lowerCAmelCase = pt_inputs['''input_ids'''].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase = pt_model(**lowerCamelCase_ ).to_tuple()
__lowerCAmelCase = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
__lowerCAmelCase = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
__lowerCAmelCase = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 611 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''camembert'''
def __init__( self ,lowerCamelCase_=30522 ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=12 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-12 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=2 ,lowerCamelCase_="absolute" ,lowerCamelCase_=True ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : List[Any] = classifier_dropout
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 614 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = OmegaConf.load(_A )
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model''']
lowerCAmelCase_ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase_ = {}
lowerCAmelCase_ = '''first_stage_model.'''
for key in keys:
if key.startswith(_A ):
lowerCAmelCase_ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase_ = {}
lowerCAmelCase_ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_A ):
lowerCAmelCase_ = state_dict[key]
lowerCAmelCase_ = config.model.params.first_stage_config.params
lowerCAmelCase_ = config.model.params.unet_config.params
lowerCAmelCase_ = VQModel(**_A ).eval()
vqvae.load_state_dict(_A )
lowerCAmelCase_ = UNetLDMModel(**_A ).eval()
unet.load_state_dict(_A )
lowerCAmelCase_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_A , )
lowerCAmelCase_ = LDMPipeline(_A , _A , _A )
pipeline.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_A = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 325 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __UpperCAmelCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self, UpperCamelCase__ = 1, UpperCamelCase__ = None, UpperCamelCase__ = 0.0, UpperCamelCase__ = 50, UpperCamelCase__ = "pil", UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=UpperCamelCase__, )
lowerCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase_ = self.scheduler.scale_model_input(UpperCamelCase__, UpperCamelCase__ )
# predict the noise residual
lowerCAmelCase_ = self.unet(UpperCamelCase__, UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase_ = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0, 1 )
lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 325 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 | 1 |
import math
def lowerCAmelCase ( snake_case__ : int )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( snake_case__ : int = 10001 )-> int:
try:
A_ = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
A_ = []
A_ = 2
while len(snake_case__ ) < nth:
if is_prime(snake_case__ ):
primes.append(snake_case__ )
num += 1
else:
num += 1
return primes[len(snake_case__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 608 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
A_ = value
A_ = None
A_ = None
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
A_ = tree
def lowercase_ ( self , __UpperCamelCase ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[str] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : Any = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[int] = parent
_lowercase : List[str] = batch_size
_lowercase : Any = seq_length
_lowercase : str = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : Dict = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : Tuple = num_labels
_lowercase : Any = num_choices
_lowercase : int = scope
_lowercase : Dict = embedding_size
def __a ( self ):
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Tuple = None
if self.use_input_mask:
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Dict = None
if self.use_token_type_ids:
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : List[str] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[Any] = model(_lowerCAmelCase )
_lowercase : Union[str, Any] = [input_ids, input_mask]
_lowercase : Optional[Any] = model(_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Union[str, Any] = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_choices
_lowercase : Optional[int] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : str = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : int = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Dict = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Tuple = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : str = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = config_and_inputs
_lowercase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : Tuple = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : int = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 66 | from jiwer import compute_measures
import datasets
A_: Optional[int] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A_: str = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A_: Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _UpperCAmelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
_lowercase = 0
_lowercase = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
_lowercase = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 398 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
UpperCamelCase__ : Optional[int] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_euler')
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : List[Any] = torch.manual_seed(0)
UpperCamelCase__ : Dict = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[str] = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_euler')
UpperCamelCase__ : Tuple = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : Any = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : Optional[int] = output.images
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : str = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_dpmpp_2m')
UpperCamelCase__ : int = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Union[str, Any] = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ShapEImgaImgPipeline
UpperCAmelCase__ : str = ["image"]
UpperCAmelCase__ : int = ["image"]
UpperCAmelCase__ : Optional[int] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Tuple = False
@property
def snake_case_ ( self ) -> Union[str, Any]:
return 32
@property
def snake_case_ ( self ) -> str:
return 32
@property
def snake_case_ ( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
UpperCamelCase : List[Any] = CLIPVisionModel(SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = CLIPImageProcessor(
crop_size=224, do_center_crop=SCREAMING_SNAKE_CASE_, do_normalize=SCREAMING_SNAKE_CASE_, do_resize=SCREAMING_SNAKE_CASE_, image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], resample=3, size=224, )
return image_processor
@property
def snake_case_ ( self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : Any = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : List[Any] = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Dict = self.dummy_prior
UpperCamelCase : Tuple = self.dummy_image_encoder
UpperCamelCase : Optional[Any] = self.dummy_image_processor
UpperCamelCase : Any = self.dummy_renderer
UpperCamelCase : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : Optional[int] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
UpperCamelCase : Any = floats_tensor((1, 3, 64, 64), rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = 'cpu'
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = output.images[0]
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Any = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = torch_device == 'cpu'
UpperCamelCase : Any = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : str = batch_size * [inputs[key]]
UpperCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCamelCase : Optional[int] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : int = pipe(
SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : str =logging.get_logger(__name__)
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
# initialize config
if "resnet-50" in model_name:
lowerCamelCase =ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
lowerCamelCase =ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
lowerCamelCase =DetrConfig(use_timm_backbone=_lowerCAmelCase , backbone_config=_lowerCAmelCase )
# set label attributes
lowerCamelCase ="""panoptic""" in model_name
if is_panoptic:
lowerCamelCase =2_50
else:
lowerCamelCase =91
lowerCamelCase ="""huggingface/label-files"""
lowerCamelCase ="""coco-detection-id2label.json"""
lowerCamelCase =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase =idalabel
lowerCamelCase ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def _lowercase ( _UpperCAmelCase ) -> List[str]:
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase =[]
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase =state_dict.pop(_lowerCAmelCase )
lowerCamelCase =val
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=False ) -> List[str]:
lowerCamelCase =""""""
if is_panoptic:
lowerCamelCase ="""detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase =in_proj_weight[:2_56, :]
lowerCamelCase =in_proj_bias[:2_56]
lowerCamelCase =in_proj_weight[2_56:5_12, :]
lowerCamelCase =in_proj_bias[2_56:5_12]
lowerCamelCase =in_proj_weight[-2_56:, :]
lowerCamelCase =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase =in_proj_weight[:2_56, :]
lowerCamelCase =in_proj_bias[:2_56]
lowerCamelCase =in_proj_weight[2_56:5_12, :]
lowerCamelCase =in_proj_bias[2_56:5_12]
lowerCamelCase =in_proj_weight[-2_56:, :]
lowerCamelCase =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCamelCase =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase =in_proj_weight_cross_attn[:2_56, :]
lowerCamelCase =in_proj_bias_cross_attn[:2_56]
lowerCamelCase =in_proj_weight_cross_attn[2_56:5_12, :]
lowerCamelCase =in_proj_bias_cross_attn[2_56:5_12]
lowerCamelCase =in_proj_weight_cross_attn[-2_56:, :]
lowerCamelCase =in_proj_bias_cross_attn[-2_56:]
def _lowercase ( ) -> Dict:
lowerCamelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ) -> Dict:
lowerCamelCase , lowerCamelCase =get_detr_config(_lowerCAmelCase )
# load original model from torch hub
lowerCamelCase ={
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"""Converting model {model_name}...""" )
lowerCamelCase =torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=_lowerCAmelCase ).eval()
lowerCamelCase =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowerCAmelCase ):
if is_panoptic:
lowerCamelCase ="""detr.""" + src
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase , is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase ="""detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCamelCase =state_dict.pop(_lowerCAmelCase )
lowerCamelCase =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase =state_dict.pop(_lowerCAmelCase )
lowerCamelCase =val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCamelCase =state_dict.pop(_lowerCAmelCase )
lowerCamelCase =val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase =state_dict.pop(_lowerCAmelCase )
lowerCamelCase =val
# finally, create HuggingFace model and load state dict
lowerCamelCase =DetrForSegmentation(_lowerCAmelCase ) if is_panoptic else DetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# verify our conversion on an image
lowerCamelCase ="""coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase =DetrImageProcessor(format=_lowerCAmelCase )
lowerCamelCase =processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase =encoding["""pixel_values"""]
lowerCamelCase =detr(_lowerCAmelCase )
lowerCamelCase =model(_lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
UpperCAmelCase__ : Dict =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701 |
from typing import List
from .keymap import KEYMAP, get_character
def _lowercase ( _UpperCAmelCase ) -> Tuple:
def decorator(_UpperCAmelCase ):
lowerCamelCase =getattr(_UpperCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(_UpperCAmelCase , """handle_key""" , _UpperCAmelCase )
return func
return decorator
def _lowercase ( *_UpperCAmelCase ) -> Tuple:
def decorator(_UpperCAmelCase ):
lowerCamelCase =getattr(_UpperCAmelCase , """handle_key""" , [] )
handle += keys
setattr(_UpperCAmelCase , """handle_key""" , _UpperCAmelCase )
return func
return decorator
class __A ( a ):
def __new__( cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , """key_handler""" ):
setattr(UpperCAmelCase_ , """key_handler""" , {} )
setattr(UpperCAmelCase_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
lowerCamelCase =getattr(UpperCAmelCase_ , """handle_key""" , [] )
for key in handled_keys:
lowerCamelCase =value
return new_cls
@staticmethod
def _snake_case ( cls ):
lowerCamelCase =get_character()
if char != KEYMAP["undefined"]:
lowerCamelCase =ord(UpperCAmelCase_ )
lowerCamelCase =cls.key_handler.get(UpperCAmelCase_ )
if handler:
lowerCamelCase =char
return handler(cls )
else:
return None
def _lowercase ( cls ) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 269 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class __lowercase (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 , lowerCAmelCase__=False , **lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = d_embed
SCREAMING_SNAKE_CASE_ : str = d_proj
SCREAMING_SNAKE_CASE_ : List[Any] = cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE_ : Any = [0] + self.cutoffs
SCREAMING_SNAKE_CASE_ : Optional[Any] = div_val
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.cutoffs[0]
SCREAMING_SNAKE_CASE_ : List[str] = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE_ : Union[str, Any] = keep_order
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = []
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE_ : List[str] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase__ , name='cluster_weight' )
SCREAMING_SNAKE_CASE_ : Any = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase__ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE_ : str = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase__ )
else:
self.out_projs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._weight''' , )
SCREAMING_SNAKE_CASE_ : Tuple = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._weight''' , )
SCREAMING_SNAKE_CASE_ : Dict = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase__ )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x
if proj is not None:
SCREAMING_SNAKE_CASE_ : Dict = tf.einsum('ibd,ed->ibe' , lowerCAmelCase__ , lowerCAmelCase__ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase__ , lowerCAmelCase__ ) + b
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = shape_list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.range(lp_size[0] , dtype=target.dtype )
SCREAMING_SNAKE_CASE_ : Any = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True , lowerCAmelCase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._logit(lowerCAmelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
SCREAMING_SNAKE_CASE_ : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase__ , logits=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tf.nn.log_softmax(lowerCAmelCase__ , axis=-1 )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = shape_list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE_ : Dict = (target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE_ : Any = tf.where(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ ) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ : str = self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE_ : List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.out_layers[i][0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE_ : Any = tf.concat([cur_W, self.cluster_weight] , 0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
SCREAMING_SNAKE_CASE_ : List[str] = self._logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.out_projs[0] )
SCREAMING_SNAKE_CASE_ : Any = tf.nn.log_softmax(lowerCAmelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self._gather_logprob(lowerCAmelCase__ , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Any = self._logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.out_projs[i] )
SCREAMING_SNAKE_CASE_ : Tuple = tf.nn.log_softmax(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase__ )
if target is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self._gather_logprob(lowerCAmelCase__ , lowerCAmelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase__ , -cur_logprob , shape_list(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Tuple = tf.concat(lowerCAmelCase__ , axis=-1 )
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE_ : int = tf.reduce_mean(lowerCAmelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase__ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 101 |
import numpy as np
def UpperCamelCase_( _A :np.array )-> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'''vocab_file''': '''spiece.model'''}
__SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_lowerCamelCase = 3
_lowerCamelCase = do_lower_case
_lowerCamelCase = remove_space
_lowerCamelCase = keep_accents
_lowerCamelCase = vocab_file
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_lowerCamelCase = jieba
_lowerCamelCase = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self ):
return len(self.sp_model )
def snake_case__ ( self ):
_lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , lowerCamelCase__ ):
if self.remove_space:
_lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
_lowerCamelCase = inputs
_lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_lowerCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase__ )
_lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] )
if self.do_lower_case:
_lowerCamelCase = outputs.lower()
return outputs
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.preprocess_text(lowerCamelCase__ )
_lowerCamelCase = self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
_lowerCamelCase = []
for piece in pieces:
if len(lowerCamelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCamelCase = cur_pieces[1:]
else:
_lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase__ )
else:
new_pieces.append(lowerCamelCase__ )
return new_pieces
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.PieceToId(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.IdToPiece(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ''' ''' ).strip()
return out_string
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1]
return ([0] * len(lowerCamelCase__ )) + [1, 1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = super()._decode(*lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 700 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
a_ : Union[str, Any] = self.dummy_uncond_unet
a_ : Optional[int] = ScoreSdeVeScheduler()
a_ : Dict = ScoreSdeVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
sde_ve.to(lowerCamelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase__ )
a_ : Any = torch.manual_seed(0 )
a_ : List[str] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowerCamelCase__ ).images
a_ : int = torch.manual_seed(0 )
a_ : List[Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowerCamelCase__ , return_dict=lowerCamelCase__ )[
0
]
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Any = '''google/ncsnpp-church-256'''
a_ : int = UNetaDModel.from_pretrained(lowerCamelCase__ )
a_ : Any = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase__ )
a_ : Union[str, Any] = ScoreSdeVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
sde_ve.to(lowerCamelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase__ )
a_ : List[str] = torch.manual_seed(0 )
a_ : List[str] = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=lowerCamelCase__ ).images
a_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : List[str] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 466 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 641 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor | 641 | 1 |
'''simple docstring'''
import torch
def _UpperCamelCase ( ):
"""simple docstring"""
if torch.cuda.is_available():
__magic_name__ : int = torch.cuda.device_count()
else:
__magic_name__ : str = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main() | 436 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
class _snake_case ( nn.Module ):
'''simple docstring'''
__snake_case = 42
__snake_case = (1_6, 3_2, 9_6, 2_5_6)
__snake_case = jnp.floataa
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
__magic_name__ : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
__magic_name__ : Optional[Any] = self.block_out_channels[i]
__magic_name__ : Dict = self.block_out_channels[i + 1]
__magic_name__ : Union[str, Any] = nn.Conv(
__UpperCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCamelCase )
__magic_name__ : Tuple = nn.Conv(
__UpperCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCamelCase )
__magic_name__ : Optional[Any] = blocks
__magic_name__ : Dict = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: str , __UpperCamelCase: Union[str, Any] ) -> Optional[Any]:
__magic_name__ : Dict = self.conv_in(__UpperCamelCase )
__magic_name__ : List[Any] = nn.silu(__UpperCamelCase )
for block in self.blocks:
__magic_name__ : int = block(__UpperCamelCase )
__magic_name__ : str = nn.silu(__UpperCamelCase )
__magic_name__ : Union[str, Any] = self.conv_out(__UpperCamelCase )
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , snake_case_ , snake_case_ ):
'''simple docstring'''
__snake_case = 3_2
__snake_case = 4
__snake_case = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case = False
__snake_case = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__snake_case = 2
__snake_case = 8
__snake_case = None
__snake_case = 1_2_8_0
__snake_case = 0.0
__snake_case = False
__snake_case = jnp.floataa
__snake_case = True
__snake_case = 0
__snake_case = "rgb"
__snake_case = (1_6, 3_2, 9_6, 2_5_6)
def lowerCAmelCase__ ( self: int , __UpperCamelCase: jax.random.KeyArray ) -> FrozenDict:
# init input tensors
__magic_name__ : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ : str = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa )
__magic_name__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ : List[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
__magic_name__ : List[Any] = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa )
__magic_name__ , __magic_name__ : List[str] = jax.random.split(__UpperCamelCase )
__magic_name__ : Optional[Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )["params"]
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = self.block_out_channels
__magic_name__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ : Dict = FlaxTimestepEmbedding(__UpperCamelCase , dtype=self.dtype )
__magic_name__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__magic_name__ : Union[str, Any] = self.only_cross_attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ : Any = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ : Optional[int] = []
__magic_name__ : Union[str, Any] = []
__magic_name__ : Optional[int] = block_out_channels[0]
__magic_name__ : Tuple = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ : Optional[Any] = output_channel
__magic_name__ : List[Any] = block_out_channels[i]
__magic_name__ : int = i == len(__UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__magic_name__ : Optional[int] = FlaxDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCamelCase )
for _ in range(self.layers_per_block ):
__magic_name__ : Any = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCamelCase )
if not is_final_block:
__magic_name__ : Optional[int] = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCamelCase )
__magic_name__ : str = down_blocks
__magic_name__ : List[str] = controlnet_down_blocks
# mid
__magic_name__ : Optional[Any] = block_out_channels[-1]
__magic_name__ : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__UpperCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__magic_name__ : List[Any] = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: Optional[int] , __UpperCamelCase: Any , __UpperCamelCase: Dict , __UpperCamelCase: Optional[Any] , __UpperCamelCase: List[str] , __UpperCamelCase: float = 1.0 , __UpperCamelCase: bool = True , __UpperCamelCase: bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
__magic_name__ : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__magic_name__ : Dict = jnp.flip(__UpperCamelCase , axis=1 )
# 1. time
if not isinstance(__UpperCamelCase , jnp.ndarray ):
__magic_name__ : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
__magic_name__ : Optional[Any] = jnp.expand_dims(__UpperCamelCase , 0 )
__magic_name__ : Optional[int] = self.time_proj(__UpperCamelCase )
__magic_name__ : Any = self.time_embedding(__UpperCamelCase )
# 2. pre-process
__magic_name__ : Optional[Any] = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
__magic_name__ : List[Any] = self.conv_in(__UpperCamelCase )
__magic_name__ : List[Any] = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
__magic_name__ : Dict = self.controlnet_cond_embedding(__UpperCamelCase )
sample += controlnet_cond
# 3. down
__magic_name__ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ , __magic_name__ : Dict = down_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
else:
__magic_name__ , __magic_name__ : Union[str, Any] = down_block(__UpperCamelCase , __UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__magic_name__ : Optional[int] = self.mid_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
# 5. contronet blocks
__magic_name__ : Optional[Any] = ()
for down_block_res_sample, controlnet_block in zip(__UpperCamelCase , self.controlnet_down_blocks ):
__magic_name__ : Any = controlnet_block(__UpperCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
__magic_name__ : int = controlnet_down_block_res_samples
__magic_name__ : List[str] = self.controlnet_mid_block(__UpperCamelCase )
# 6. scaling
__magic_name__ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__UpperCamelCase , mid_block_res_sample=__UpperCamelCase ) | 436 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__a: Any = datasets.logging.get_logger(__name__)
__a: Any = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
__a: Optional[int] = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
__a: Union[str, Any] = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
__a: int = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[str] ) -> int:
"""simple docstring"""
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_UpperCAmelCase = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_UpperCAmelCase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_UpperCAmelCase = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
_UpperCAmelCase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_UpperCAmelCase = score.BleurtScorer(os.path.join(lowerCamelCase , lowerCamelCase ) )
def lowerCamelCase ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.scorer.score(references=lowerCamelCase , candidates=lowerCamelCase )
return {"scores": scores} | 402 |
from collections import deque
from math import floor
from random import random
from time import time
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
def lowerCamelCase ( self : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Any=1 ) -> int:
"""simple docstring"""
if self.graph.get(lowerCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase = [[w, v]]
if not self.graph.get(lowerCamelCase ):
_UpperCAmelCase = []
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return list(self.graph )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
if self.graph.get(lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : List[str]=-2 , lowerCamelCase : List[str]=-1 ) -> Any:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return visited
def lowerCamelCase ( self : Any , lowerCamelCase : Optional[int]=-1 ) -> int:
"""simple docstring"""
if c == -1:
_UpperCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase , lowerCamelCase , 1 )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : str=-2 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(lowerCamelCase )
visited.append(lowerCamelCase )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase ( self : List[str] , lowerCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
return len(self.graph[u] )
def lowerCamelCase ( self : str , lowerCamelCase : Optional[int]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return sorted_nodes
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return list(lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return False
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Any=-2 , lowerCamelCase : List[Any]=-1 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = time()
self.dfs(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = time()
return end - begin
def lowerCamelCase ( self : str , lowerCamelCase : Optional[Any]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = time()
self.bfs(lowerCamelCase )
_UpperCAmelCase = time()
return end - begin
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=1 ) -> Tuple:
"""simple docstring"""
# check if the u exists
if self.graph.get(lowerCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase = [[w, u]]
def lowerCamelCase ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : int ) -> Dict:
"""simple docstring"""
if self.graph.get(lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase )
# the other way round
if self.graph.get(lowerCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Union[str, Any]=-2 , lowerCamelCase : int=-1 ) -> Optional[Any]:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return visited
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str=-1 ) -> List[str]:
"""simple docstring"""
if c == -1:
_UpperCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase , lowerCamelCase , 1 )
def lowerCamelCase ( self : Any , lowerCamelCase : List[Any]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(lowerCamelCase )
visited.append(lowerCamelCase )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Any , lowerCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return len(self.graph[u] )
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return list(lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(lowerCamelCase )
visited.append(lowerCamelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = stack[len(lowerCamelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(lowerCamelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowerCamelCase ) == 0:
return False
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
return list(self.graph )
def lowerCamelCase ( self : str , lowerCamelCase : str=-2 , lowerCamelCase : Optional[int]=-1 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = time()
self.dfs(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = time()
return end - begin
def lowerCamelCase ( self : Any , lowerCamelCase : List[Any]=-2 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = time()
self.bfs(lowerCamelCase )
_UpperCAmelCase = time()
return end - begin | 402 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ):
'''simple docstring'''
return [ord(__A ) - 96 for elem in plain]
def _lowercase ( __A ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ ,__A )
print("""Decoded:""" ,decode(__A ) )
if __name__ == "__main__":
main()
| 601 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self , lowercase ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__UpperCamelCase = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = """sgugger/tiny-distilbert-classification"""
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
__UpperCamelCase = None
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = AutoConfig.from_pretrained(lowercase )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = """sshleifer/tinier_bart"""
__UpperCamelCase = AutoConfig.from_pretrained(lowercase )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
__UpperCamelCase = AutoConfig.from_pretrained(lowercase )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = """sshleifer/tinier_bart"""
__UpperCamelCase = AutoConfig.from_pretrained(lowercase )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(lowercase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(lowercase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(lowercase , """train_time.csv""" ) , env_info_csv_file=os.path.join(lowercase , """env.csv""" ) , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """env.csv""" ) ).exists() )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowercase ):
self.assertTrue(hasattr(lowercase , """sequential""" ) )
self.assertTrue(hasattr(lowercase , """cumulative""" ) )
self.assertTrue(hasattr(lowercase , """current""" ) )
self.assertTrue(hasattr(lowercase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , """log.txt""" ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
__UpperCamelCase = PyTorchBenchmark(lowercase )
__UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , """log.txt""" ) ).exists() )
| 601 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_a = BertConfig.from_json_file(_lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = BertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 285 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_a , _a = emb.weight.shape
_a = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase )
_a = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any]="facebook/mbart-large-en-ro", _lowerCAmelCase : Any=False, _lowerCAmelCase : int=False ):
"""simple docstring"""
_a = torch.load(_lowerCAmelCase, map_location='''cpu''' )['''model''']
remove_ignore_keys_(_lowerCAmelCase )
_a = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_a = MBartConfig.from_pretrained(_lowerCAmelCase, vocab_size=_lowerCAmelCase )
if mbart_aa and finetuned:
_a = '''relu'''
_a = state_dict['''decoder.embed_tokens.weight''']
_a = MBartForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase )
if finetuned:
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__snake_case = parser.parse_args()
__snake_case = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 285 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ =Features({"""text""": Value("""string""" )} )
UpperCamelCase__ =Features({"""labels""": ClassLabel} )
UpperCamelCase__ ="text"
UpperCamelCase__ ="labels"
def snake_case_ ( self : Optional[int] , snake_case : Tuple ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCAmelCase_ :Dict = copy.deepcopy(self )
UpperCAmelCase_ :Dict = self.label_schema.copy()
UpperCAmelCase_ :List[str] = features[self.label_column]
UpperCAmelCase_ :Optional[int] = label_schema
return task_template
@property
def snake_case_ ( self : str ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 608 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _snake_case ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int , snake_case : Tuple=None , snake_case : List[Any]=True , snake_case : int=None , **snake_case : Any ):
UpperCAmelCase_ :str = parent
UpperCAmelCase_ :Tuple = config_class
UpperCAmelCase_ :Optional[int] = has_text_modality
UpperCAmelCase_ :int = kwargs
UpperCAmelCase_ :Any = common_properties
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :int = self.config_class(**self.inputs_dict )
UpperCAmelCase_ :List[Any] = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case , snake_case ) , msg=f'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case ):
try:
setattr(snake_case , snake_case , snake_case )
self.parent.assertEqual(
getattr(snake_case , snake_case ) , snake_case , msg=f'`{name} value {idx} expected, but was {getattr(snake_case , snake_case )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case ):
try:
UpperCAmelCase_ :Tuple = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case , snake_case ) , snake_case , msg=f'`{name} value {idx} expected, but was {getattr(snake_case , snake_case )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case_ ( self : int ):
UpperCAmelCase_ :Union[str, Any] = self.config_class(**self.inputs_dict )
UpperCAmelCase_ :Optional[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case )
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :List[str] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ :Dict = os.path.join(snake_case , '''config.json''' )
config_first.to_json_file(snake_case )
UpperCAmelCase_ :Union[str, Any] = self.config_class.from_json_file(snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case )
UpperCAmelCase_ :Union[str, Any] = self.config_class.from_pretrained(snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :List[Any] = self.config_class(**self.inputs_dict )
UpperCAmelCase_ :Dict = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ :List[str] = os.path.join(snake_case , snake_case )
config_first.save_pretrained(snake_case )
UpperCAmelCase_ :Optional[int] = self.config_class.from_pretrained(snake_case , subfolder=snake_case )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Dict = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCAmelCase_ :List[str] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case_ ( self : int ):
if self.config_class.is_composition:
return
UpperCAmelCase_ :int = self.config_class()
self.parent.assertIsNotNone(snake_case )
def snake_case_ ( self : int ):
UpperCAmelCase_ :str = copy.deepcopy(snake_case )
UpperCAmelCase_ :Optional[Any] = self.config_class(**snake_case )
UpperCAmelCase_ :Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(snake_case , snake_case ) != value:
wrong_values.append((key, getattr(snake_case , snake_case ), value) )
if len(snake_case ) > 0:
UpperCAmelCase_ :int = '''\n'''.join([f'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(f'The following keys were not properly set in the config:\n{errors}' )
def snake_case_ ( self : int ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 608 | 1 |
"""simple docstring"""
import qiskit
def lowerCamelCase (a_ :int , a_ :int) -> qiskit.result.counts.Counts:
lowercase :Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''')
# Create a Quantum Circuit acting on the q register
lowercase :Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase)
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0])
# Execute the circuit on the simulator
lowercase :Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase)
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 709 |
"""simple docstring"""
import math
def lowerCamelCase (a_ :int) -> bool:
assert isinstance(a_ , a_) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase :Any = range(3 , int(math.sqrt(a_) + 1) , 2)
return not any(not number % i for i in odd_numbers)
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int]=1 , **a_ :List[str]) -> Any:
lowercase :str = factor * value
lowercase :int = value
while not is_prime(a_):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **a_)
return value
| 475 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , *a: Union[str, Any] , **a: Dict ):
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 669 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 154 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =XLMTokenizer
UpperCamelCase__ : List[Any] =False
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : int =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : Optional[Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Optional[int] =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='lower newer'
__UpperCamelCase : int ='lower newer'
return input_text, output_text
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =XLMTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : str ='lower'
__UpperCamelCase : int =['low', 'er</w>']
__UpperCamelCase : List[str] =tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =tokens + ['<unk>']
__UpperCamelCase : Optional[Any] =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
__UpperCamelCase : Optional[int] =tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : str =tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 154 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=13 , SCREAMING_SNAKE_CASE_ : List[Any]=10 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=37 , SCREAMING_SNAKE_CASE_ : int="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=10 , SCREAMING_SNAKE_CASE_ : str=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]="divided_space_time" , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_frames
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = attention_type
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = (num_frames) * self.num_patches_per_frame + 1
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCamelCase__ = self.num_labels
return config
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCamelCase__ = TimesformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# verify the logits shape
lowerCamelCase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = TimesformerModelTester(self )
lowerCamelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ):
lowerCamelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def __UpperCAmelCase ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __UpperCAmelCase ( self : List[str] ):
pass
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __UpperCAmelCase ( self : Dict ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __UpperCAmelCase ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Optional[Any] ):
if not self.has_attentions:
pass
else:
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
for model_class in self.all_model_classes:
lowerCamelCase__ = self.model_tester.seq_length
lowerCamelCase__ = self.model_tester.num_frames
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCAmelCase ( self : Optional[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowerCamelCase__ = np.load(__lowercase )
return list(__lowercase )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_video()
lowerCamelCase__ = image_processor(video[:8] , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 129 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=18 , SCREAMING_SNAKE_CASE_ : Optional[int]=30 , SCREAMING_SNAKE_CASE_ : Optional[int]=400 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Any=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Tuple=False , ):
lowerCamelCase__ = size if size is not None else {"""height""": 20, """width""": 20}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
lowerCamelCase__ = do_reduce_labels
def __UpperCAmelCase ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase__ = Image.open(dataset[0]["""file"""] )
lowerCamelCase__ = Image.open(dataset[1]["""file"""] )
return image, map
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase__ = Image.open(ds[0]["""file"""] )
lowerCamelCase__ = Image.open(ds[1]["""file"""] )
lowerCamelCase__ = Image.open(ds[2]["""file"""] )
lowerCamelCase__ = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""" ) )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Dict ):
pass
def __UpperCAmelCase ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self : Dict ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self : Optional[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_single_inputs()
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_batch_inputs()
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def __UpperCAmelCase ( self : List[Any] ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCamelCase__ , lowerCamelCase__ = prepare_semantic_single_inputs()
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
lowerCamelCase__ = True
lowerCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 129 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self: str ,lowerCamelCase_: bool ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[int] = None ) -> int:
super().__init__()
UpperCAmelCase_ : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase_ : Optional[int] = torch.zeros(lowerCamelCase_ ,lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Tuple = torch.nn.Parameter(lowerCamelCase_ )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : VQModel
A__ : CLIPTextModel
A__ : CLIPTokenizer
A__ : TransformeraDModel
A__ : LearnedClassifierFreeSamplingEmbeddings
A__ : VQDiffusionScheduler
def __init__( self: str ,lowerCamelCase_: VQModel ,lowerCamelCase_: CLIPTextModel ,lowerCamelCase_: CLIPTokenizer ,lowerCamelCase_: TransformeraDModel ,lowerCamelCase_: VQDiffusionScheduler ,lowerCamelCase_: LearnedClassifierFreeSamplingEmbeddings ,) -> int:
super().__init__()
self.register_modules(
vqvae=lowerCamelCase_ ,transformer=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,learned_classifier_free_sampling_embeddings=lowerCamelCase_ ,)
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1
# get prompt text embeddings
UpperCAmelCase_ : List[Any] = self.tokenizer(
lowerCamelCase_ ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase_ : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase_ : int = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase_ : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase_ ,1 ,1 )
else:
UpperCAmelCase_ : Optional[int] = [""""""] * batch_size
UpperCAmelCase_ : Tuple = text_input_ids.shape[-1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
lowerCamelCase_ ,padding="""max_length""" ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase_ : Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[str] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : str = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ ,1 )
UpperCAmelCase_ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple ,lowerCamelCase_: Union[str, List[str]] ,lowerCamelCase_: int = 100 ,lowerCamelCase_: float = 5.0 ,lowerCamelCase_: float = 1.0 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[torch.FloatTensor] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase_: int = 1 ,) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = 1
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = len(lowerCamelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
UpperCAmelCase_ : Any = batch_size * num_images_per_prompt
UpperCAmelCase_ : int = guidance_scale > 1.0
UpperCAmelCase_ : Union[str, Any] = self._encode_prompt(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowerCamelCase_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase_ : Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase_ : Optional[int] = self.transformer.num_vector_embeds - 1
UpperCAmelCase_ : Optional[int] = torch.full(lowerCamelCase_ ,lowerCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase_ : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ ,device=self.device )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ : Any = latents
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase_ : Tuple = self.transformer(lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,timestep=lowerCamelCase_ ).sample
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = model_output.chunk(2 )
UpperCAmelCase_ : str = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCamelCase_ ,dim=1 ,keepdim=lowerCamelCase_ )
UpperCAmelCase_ : str = self.truncate(lowerCamelCase_ ,lowerCamelCase_ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase_ : Optional[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCamelCase_ ,timestep=lowerCamelCase_ ,sample=lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.vqvae.config.vq_embed_dim
UpperCAmelCase_ : Union[str, Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase_ : List[Any] = self.vqvae.quantize.get_codebook_entry(lowerCamelCase_ ,shape=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = self.vqvae.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : str = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: float ) -> torch.FloatTensor:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.sort(lowerCamelCase_ ,1 ,descending=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.exp(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase_ : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.cat((all_true, keep_mask) ,dim=1 )
UpperCAmelCase_ : Union[str, Any] = keep_mask[:, :-1, :]
UpperCAmelCase_ : Dict = keep_mask.gather(1 ,indices.argsort(1 ) )
UpperCAmelCase_ : Any = log_p_x_0.clone()
UpperCAmelCase_ : int = -torch.inf # -inf = log(0)
return rv
| 322 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_snake_case : Dict = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "decord" )
self.check_model_type(lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str=None , lowerCamelCase : Any=None , lowerCamelCase : Any=None ) -> Optional[int]:
__snake_case : Any = {}
if frame_sampling_rate is not None:
__snake_case : Union[str, Any] = frame_sampling_rate
if num_frames is not None:
__snake_case : Optional[Any] = num_frames
__snake_case : int = {}
if top_k is not None:
__snake_case : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , lowerCamelCase : Union[str, List[str]] , **lowerCamelCase : Optional[int] ) -> Tuple:
return super().__call__(lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Dict=1 ) -> str:
if num_frames is None:
__snake_case : List[str] = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
__snake_case : Optional[Any] = BytesIO(requests.get(lowerCamelCase ).content )
__snake_case : Optional[int] = VideoReader(lowerCamelCase )
videoreader.seek(0 )
__snake_case : Optional[Any] = 0
__snake_case : Dict = num_frames * frame_sampling_rate - 1
__snake_case : Union[str, Any] = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa )
__snake_case : Optional[Any] = videoreader.get_batch(lowerCamelCase ).asnumpy()
__snake_case : Any = list(lowerCamelCase )
__snake_case : List[Any] = self.image_processor(lowerCamelCase , return_tensors=self.framework )
return model_inputs
def __snake_case ( self : Dict , lowerCamelCase : Tuple ) -> Tuple:
__snake_case : List[str] = self.model(**lowerCamelCase )
return model_outputs
def __snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : int=5 ) -> List[Any]:
if top_k > self.model.config.num_labels:
__snake_case : str = self.model.config.num_labels
if self.framework == "pt":
__snake_case : List[str] = model_outputs.logits.softmax(-1 )[0]
__snake_case , __snake_case : Union[str, Any] = probs.topk(lowerCamelCase )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__snake_case : Any = scores.tolist()
__snake_case : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 81 |
def lowerCamelCase ( ) -> List[Any]:
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def lowerCamelCase ( a_ ) -> str:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
while i * i <= n:
lowerCAmelCase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase ( ) -> Optional[Any]:
return next(i for i in triangle_number_generator() if count_divisors(a_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 318 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowerCAmelCase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 160 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160 | 1 |
"""simple docstring"""
def a ( __snake_case : int = 10 ):
'''simple docstring'''
if not isinstance(__snake_case, __snake_case ) or n < 0:
raise ValueError('''Invalid input''' )
UpperCAmelCase_ :int = 10**n
UpperCAmelCase_ :List[str] = 28433 * (pow(2, 7830457, __snake_case )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 608 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ :str = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ :Tuple = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ :int = 0.01
with locka.acquire():
with pytest.raises(__snake_case ):
UpperCAmelCase_ :List[str] = time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def a ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ :Optional[int] = '''a''' * 1000 + '''.lock'''
UpperCAmelCase_ :Any = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ :Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 608 | 1 |
'''simple docstring'''
def A_( A : float , A : float):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_00, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 432 |
'''simple docstring'''
import argparse
import copy
def A_( A : Optional[int]):
UpperCamelCase = {}
with open(A) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def A_( A : Union[str, Any] , A : str):
with open(A) as f:
UpperCamelCase = f.read(1)
UpperCamelCase = start_node
UpperCamelCase = []
UpperCamelCase = start_node
UpperCamelCase = 0
while visiting not in first_solution:
UpperCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(A) and k[0] not in first_solution:
UpperCamelCase = k[1]
UpperCamelCase = k[0]
first_solution.append(A)
UpperCamelCase = distance_of_first_solution + int(A)
UpperCamelCase = best_node
first_solution.append(A)
UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def A_( A : List[Any] , A : str):
UpperCamelCase = []
for n in solution[1:-1]:
UpperCamelCase = solution.index(A)
for kn in solution[1:-1]:
UpperCamelCase = solution.index(A)
if n == kn:
continue
UpperCamelCase = copy.deepcopy(A)
UpperCamelCase = kn
UpperCamelCase = n
UpperCamelCase = 0
for k in _tmp[:-1]:
UpperCamelCase = _tmp[_tmp.index(A) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase = distance + int(i[1])
_tmp.append(A)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
UpperCamelCase = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda A: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def A_( A : List[str] , A : Any , A : Any , A : List[Any] , A : Any):
UpperCamelCase = 1
UpperCamelCase = first_solution
UpperCamelCase = []
UpperCamelCase = distance_of_first_solution
UpperCamelCase = solution
while count <= iters:
UpperCamelCase = find_neighborhood(A , A)
UpperCamelCase = 0
UpperCamelCase = neighborhood[index_of_best_solution]
UpperCamelCase = len(A) - 1
UpperCamelCase = False
while not found:
UpperCamelCase = 0
while i < len(A):
if best_solution[i] != solution[i]:
UpperCamelCase = best_solution[i]
UpperCamelCase = solution[i]
break
UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
UpperCamelCase = True
UpperCamelCase = best_solution[:-1]
UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase = cost
UpperCamelCase = solution
else:
UpperCamelCase = index_of_best_solution + 1
UpperCamelCase = neighborhood[index_of_best_solution]
if len(A) >= size:
tabu_list.pop(0)
UpperCamelCase = count + 1
return best_solution_ever, best_cost
def A_( A : Optional[Any]=None):
UpperCamelCase = generate_neighbours(args.File)
UpperCamelCase , UpperCamelCase = generate_first_solution(
args.File , A)
UpperCamelCase , UpperCamelCase = tabu_search(
A , A , A , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''')
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 432 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :List[str] , **__A :Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Dict , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :str ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :List[str] , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Tuple , **__A :Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :Optional[Any] , **__A :Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :List[Any] , *__A :Any , **__A :List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :List[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :List[str] , **__A :int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :Tuple , **__A :str ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :List[str] , **__A :Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Any , **__A :List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :List[Any] , **__A :int ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :Dict , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Tuple , **__A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Optional[Any] , *__A :Optional[int] , **__A :Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Any , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Optional[Any] , *__A :List[Any] , **__A :Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :int , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :str , *__A :int , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :List[Any] , **__A :Dict ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :Optional[Any] , **__A :str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :str , **__A :int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Union[str, Any] , **__A :int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Optional[Any] , *__A :Optional[Any] , **__A :Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :List[str] , **__A :int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :List[Any] , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :Optional[Any] , **__A :List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :List[Any] , **__A :Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Optional[int] , **__A :List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :Optional[Any] , **__A :List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Optional[Any] , **__A :Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :str , **__A :str ) -> int:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :List[str] , **__A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Optional[Any] , **__A :Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Optional[Any] , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Tuple , *__A :Dict , **__A :List[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Optional[int] , *__A :Any , **__A :int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :Tuple , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] ) | 6 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 1 |
from math import pow
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a_ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = NllbTokenizer
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Optional[int] , lowercase__ : Dict=None , lowercase__ : int=None , lowercase__ : Union[str, Any]="<s>" , lowercase__ : str="</s>" , lowercase__ : str="</s>" , lowercase__ : Union[str, Any]="<s>" , lowercase__ : Optional[int]="<unk>" , lowercase__ : Any="<pad>" , lowercase__ : List[Any]="<mask>" , lowercase__ : Optional[int]=None , lowercase__ : Union[str, Any]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=False , **lowercase__ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
a_ = legacy_behaviour
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , )
a_ = vocab_file
a_ = False if not self.vocab_file else True
a_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
a_ = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a_ = src_lang if src_lang is not None else '''eng_Latn'''
a_ = self.convert_tokens_to_ids(self._src_lang )
a_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Optional[Any] , lowercase__ : str ):
a_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Tuple , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : int , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Tuple , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[str] , lowercase__ : Optional[str] , **lowercase__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a_ = src_lang
a_ = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
a_ = self.convert_tokens_to_ids(lowercase__ )
a_ = tgt_lang_id
return inputs
def __magic_name__ ( self : List[Any] , lowercase__ : List[str] , lowercase__ : str = "eng_Latn" , lowercase__ : Optional[List[str]] = None , lowercase__ : str = "fra_Latn" , **lowercase__ : Union[str, Any] , ):
a_ = src_lang
a_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __magic_name__ ( self : Tuple ):
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : Tuple , lowercase__ : int ):
a_ = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens )
a_ = self.convert_ids_to_tokens(self.suffix_tokens )
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : List[str] , lowercase__ : str ):
a_ = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens )
a_ = self.convert_ids_to_tokens(self.suffix_tokens )
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
a_ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 143 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 10
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [1, 2, 3, 4]
lowerCamelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."""
lowerCamelCase , lowerCamelCase = process_story(_snake_case )
self.assertEqual(_snake_case , [] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """"""
lowerCamelCase , lowerCamelCase = process_story(_snake_case )
self.assertEqual(_snake_case , [] )
self.assertEqual(_snake_case , [] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
lowerCamelCase , lowerCamelCase = process_story(_snake_case )
lowerCamelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_snake_case , _snake_case )
lowerCamelCase = ["""It was the best of times."""]
self.assertEqual(_snake_case , _snake_case )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.tensor([1, 2, 3, 4] )
lowerCamelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_snake_case , 0 ).numpy() , expected.numpy() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case , 23 ).numpy() , expected.numpy() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case , 1 ).numpy() , expected.numpy() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 101
lowerCamelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowerCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCamelCase = compute_token_type_ids(_snake_case , _snake_case )
np.testing.assert_array_equal(_snake_case , _snake_case )
| 543 | """simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_A = 'http://www.mocksite.com/file1.txt'
_A = '"text": ["foo", "foo"]'
_A = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCamelCase :
'''simple docstring'''
a = 2_0_0
a = {"Content-Length": "100"}
a = {}
def lowerCAmelCase_ ( self : Union[str, Any] , **_snake_case : str ) -> Any:
return [bytes(_snake_case , "utf-8" )]
def SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
import requests
monkeypatch.setattr(__UpperCAmelCase , "request" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = URL
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = url
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [url]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = {"train": url}
SCREAMING_SNAKE_CASE__ = "dummy"
SCREAMING_SNAKE_CASE__ = "downloads"
SCREAMING_SNAKE_CASE__ = tmp_path
SCREAMING_SNAKE_CASE__ = DownloadConfig(
cache_dir=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , use_etag=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = dl_manager.download(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [downloaded_paths]
SCREAMING_SNAKE_CASE__ = [urls]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE__ = downloaded_paths.values()
SCREAMING_SNAKE_CASE__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE__ = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE__ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ = str(__UpperCAmelCase )
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = filename
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [filename]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = {"train": filename}
SCREAMING_SNAKE_CASE__ = "dummy"
SCREAMING_SNAKE_CASE__ = xz_file.parent
SCREAMING_SNAKE_CASE__ = "extracted"
SCREAMING_SNAKE_CASE__ = DownloadConfig(
cache_dir=__UpperCAmelCase , use_etag=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = dl_manager.extract(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [extracted_paths]
SCREAMING_SNAKE_CASE__ = [paths]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE__ = extracted_paths.values()
SCREAMING_SNAKE_CASE__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCAmelCase , etag=__UpperCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE__ = extracted_path.read_text()
SCREAMING_SNAKE_CASE__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__UpperCAmelCase , start=1 ):
SCREAMING_SNAKE_CASE__ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = request.getfixturevalue(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ = request.getfixturevalue(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCAmelCase ) , start=1 ):
assert os.path.basename(__UpperCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 159 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_UpperCAmelCase = 'hf-internal-testing/tiny-random-bert'
_UpperCAmelCase = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
_UpperCAmelCase = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = cached_file(_snake_case , _snake_case )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_snake_case ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_snake_case , _snake_case ) ) )
with open(os.path.join(_snake_case , """refs""" , """main""" ) ) as f:
__lowerCAmelCase : Dict = f.read()
self.assertEqual(_snake_case , os.path.join(_snake_case , """snapshots""" , _snake_case , _snake_case ) )
self.assertTrue(os.path.isfile(_snake_case ) )
# File is cached at the same place the second time.
__lowerCAmelCase : int = cached_file(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# Using a specific revision to test the full commit hash.
__lowerCAmelCase : List[str] = cached_file(_snake_case , _snake_case , revision="""9b8c223""" )
self.assertEqual(_snake_case , os.path.join(_snake_case , """snapshots""" , _snake_case , _snake_case ) )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
with self.assertRaisesRegex(_snake_case , """is not a valid model identifier""" ):
__lowerCAmelCase : Tuple = cached_file("""tiny-random-bert""" , _snake_case )
with self.assertRaisesRegex(_snake_case , """is not a valid git identifier""" ):
__lowerCAmelCase : Union[str, Any] = cached_file(_snake_case , _snake_case , revision="""aaaa""" )
with self.assertRaisesRegex(_snake_case , """does not appear to have a file named""" ):
__lowerCAmelCase : Optional[Any] = cached_file(_snake_case , """conf""" )
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
with self.assertRaisesRegex(_snake_case , """does not appear to have a file named""" ):
__lowerCAmelCase : Dict = cached_file(_snake_case , """conf""" )
with open(os.path.join(_snake_case , """refs""" , """main""" ) ) as f:
__lowerCAmelCase : Tuple = f.read()
self.assertTrue(os.path.isfile(os.path.join(_snake_case , """.no_exist""" , _snake_case , """conf""" ) ) )
__lowerCAmelCase : Union[str, Any] = cached_file(_snake_case , """conf""" , _raise_exceptions_for_missing_entries=_snake_case )
self.assertIsNone(_snake_case )
__lowerCAmelCase : List[Any] = cached_file(_snake_case , """conf""" , local_files_only=_snake_case , _raise_exceptions_for_missing_entries=_snake_case )
self.assertIsNone(_snake_case )
__lowerCAmelCase : Optional[int] = mock.Mock()
__lowerCAmelCase : Dict = 500
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : List[Any] = HTTPError
__lowerCAmelCase : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_snake_case ) as mock_head:
__lowerCAmelCase : Tuple = cached_file(_snake_case , """conf""" , _raise_exceptions_for_connection_errors=_snake_case )
self.assertIsNone(_snake_case )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[str] )->Optional[int]:
'''simple docstring'''
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _snake_case ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _snake_case ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _snake_case ) )
def UpperCAmelCase__ ( self : int )->List[Any]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_snake_case , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , _snake_case )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_snake_case , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , _snake_case , revision="""ahaha""" )
__lowerCAmelCase : Any = get_file_from_repo("""bert-base-cased""" , _snake_case )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCAmelCase : List[str] = json.loads(open(_snake_case , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def UpperCAmelCase__ ( self : Union[str, Any] )->Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Tuple = Path(_snake_case ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_snake_case , """a.txt""" ) , str(_snake_case ) )
self.assertIsNone(get_file_from_repo(_snake_case , """b.txt""" ) ) | 240 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 100 ) -> int:
__lowerCAmelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCAmelCase : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''') | 240 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = 0
@slow
def UpperCamelCase ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_lowerCAmelCase ),0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_lowerCAmelCase ),0 )
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size,12 )
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size,20 )
def UpperCamelCase ( self ):
A__ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
# Check that tokenizer_type ≠ model_type
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,config=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size,12 )
def UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''',os.path.join(_lowerCAmelCase,'''vocab.txt''' ) )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,tokenizer_type='''bert''',use_fast=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''',os.path.join(_lowerCAmelCase,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''',os.path.join(_lowerCAmelCase,'''merges.txt''' ) )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,tokenizer_type='''gpt2''',use_fast=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
@require_tokenizers
def UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''',os.path.join(_lowerCAmelCase,'''vocab.txt''' ) )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,tokenizer_type='''bert''' )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''',os.path.join(_lowerCAmelCase,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''',os.path.join(_lowerCAmelCase,'''merges.txt''' ) )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,tokenizer_type='''gpt2''' )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
def UpperCamelCase ( self ):
with pytest.raises(_lowerCAmelCase ):
AutoTokenizer.from_pretrained('''./''',tokenizer_type='''xxx''' )
@require_tokenizers
def UpperCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_lowerCAmelCase,(BertTokenizer, BertTokenizerFast) )
if isinstance(_lowerCAmelCase,_lowerCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case,_lowerCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case,_lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length,512 )
@require_tokenizers
def UpperCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowerCAmelCase,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''',):
A__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def UpperCamelCase ( self ):
A__ = TOKENIZER_MAPPING.values()
A__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowerCAmelCase )
@require_tokenizers
def UpperCamelCase ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''',use_fast=_lowerCAmelCase ),_lowerCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ),_lowerCAmelCase )
@require_tokenizers
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''',do_lower_case=_lowerCAmelCase )
A__ = '''Hello, world. How are you?'''
A__ = tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual('''[UNK]''',tokens[0] )
A__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''',do_lower_case=_lowerCAmelCase )
A__ = tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual('''[UNK]''',tokens[0] )
@require_tokenizers
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_lowerCAmelCase ),_lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length,512 )
self.assertEqual(tokenizer.vocab_size,3_0000 )
self.assertEqual(tokenizer.unk_token,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side,'''right''' )
self.assertEqual(tokenizer.truncation_side,'''right''' )
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size,12 )
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
def UpperCamelCase ( self ):
A__ = get_tokenizer_config('''bert-base-cased''' )
A__ = config.pop('''_commit_hash''',_lowerCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowerCAmelCase,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A__ = get_tokenizer_config(_lowerCAmelCase )
self.assertDictEqual(_lowerCAmelCase,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase )
A__ = get_tokenizer_config(_lowerCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''],'''BertTokenizer''' )
def UpperCamelCase ( self ):
try:
AutoConfig.register('''custom''',_lowerCAmelCase )
AutoTokenizer.register(_lowerCAmelCase,slow_tokenizer_class=_lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoTokenizer.register(_lowerCAmelCase,slow_tokenizer_class=_lowerCAmelCase )
A__ = CustomTokenizer.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase ( self ):
try:
AutoConfig.register('''custom''',_lowerCAmelCase )
# Can register in two steps
AutoTokenizer.register(_lowerCAmelCase,slow_tokenizer_class=_lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig],(CustomTokenizer, None) )
AutoTokenizer.register(_lowerCAmelCase,fast_tokenizer_class=_lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig],(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowerCAmelCase,slow_tokenizer_class=_lowerCAmelCase,fast_tokenizer_class=_lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig],(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoTokenizer.register(_lowerCAmelCase,fast_tokenizer_class=_lowerCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = BertTokenizerFast.from_pretrained(_lowerCAmelCase )
bert_tokenizer.save_pretrained(_lowerCAmelCase )
A__ = CustomTokenizerFast.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,use_fast=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase,_lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self ):
with self.assertRaises(_lowerCAmelCase ):
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''',trust_remote_code=_lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''',trust_remote_code=_lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,trust_remote_code=_lowerCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__,'''NewTokenizerFast''' )
# Test we can also load the slow version
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''',trust_remote_code=_lowerCAmelCase,use_fast=_lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,trust_remote_code=_lowerCAmelCase,use_fast=_lowerCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__,'''NewTokenizer''' )
@require_tokenizers
def UpperCamelCase ( self ):
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
__SCREAMING_SNAKE_CASE = False
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
__SCREAMING_SNAKE_CASE = NewTokenizer
__SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register('''custom''',_lowerCAmelCase )
AutoTokenizer.register(_lowerCAmelCase,slow_tokenizer_class=_lowerCAmelCase )
AutoTokenizer.register(_lowerCAmelCase,fast_tokenizer_class=_lowerCAmelCase )
# If remote code is not set, the default is to use local
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''',use_fast=_lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''',trust_remote_code=_lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''',trust_remote_code=_lowerCAmelCase,use_fast=_lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''',trust_remote_code=_lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''',trust_remote_code=_lowerCAmelCase,use_fast=_lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''',trust_remote_code=_lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizerFast''' )
# Test we can also load the slow version
A__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''',trust_remote_code=_lowerCAmelCase,use_fast=_lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__,'''NewTokenizer''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
_lowerCAmelCase,'''bert-base is not a local folder and is not a valid model identifier''' ):
A__ = AutoTokenizer.from_pretrained('''bert-base''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
_lowerCAmelCase,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ = AutoTokenizer.from_pretrained(_lowerCAmelCase,revision='''aaaaaa''' )
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count,0 )
self.assertEqual(counter.head_request_count,1 )
self.assertEqual(counter.other_request_count,0 )
| 190 | import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCamelCase( __snake_case , __snake_case , **__snake_case ) -> int:
__snake_case = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__snake_case = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 524 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__: List[Any] = None
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A__: str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A__: Tuple = '''▁'''
# Segments (not really needed)
A__: int = 0
A__: Tuple = 1
A__: int = 2
A__: Dict = 3
A__: Union[str, Any] = 4
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = "left"
__UpperCamelCase : Union[str, Any] = XLNetTokenizer
def __init__( self :Any , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :List[Any]="<s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE :Optional[int]="<sep>" , SCREAMING_SNAKE_CASE :Any="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<cls>" , SCREAMING_SNAKE_CASE :Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE :Union[str, Any]=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE :List[Any] , ) -> List[Any]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : List[str] =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Optional[int] =3
_a : Dict =do_lower_case
_a : str =remove_space
_a : List[Any] =keep_accents
_a : List[Any] =vocab_file
_a : Optional[Any] =False if not self.vocab_file else True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[Any] =[self.sep_token_id]
_a : Dict =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : str =[self.sep_token_id]
_a : Any =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : Tuple =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 506 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
A__: List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> List[Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
A__: Any = parser.parse_args()
A__: Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 506 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _lowercase ( UpperCamelCase__ : Dict ):
__A : Optional[Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _lowercase ( UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
__A : Union[str, Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _lowercase ( UpperCamelCase__ : List[Any] ):
__A : Any = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def _lowercase ( ):
__A : List[str] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Any ):
__A : int = 'imagenet-1k-id2label.json'
__A : List[str] = 1000
__A : Tuple = 'huggingface/label-files'
__A : Optional[Any] = num_labels
__A : Optional[int] = json.load(open(cached_download(hf_hub_url(UpperCamelCase__, UpperCamelCase__, repo_type='dataset' ) ), 'r' ) )
__A : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__A : int = idalabel
__A : Tuple = {v: k for k, v in idalabel.items()}
__A : Tuple = CvtConfig(num_labels=UpperCamelCase__, idalabel=UpperCamelCase__, labelaid=UpperCamelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/', 1 )[-1][4:6] == "13":
__A : Optional[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/', 1 )[-1][4:6] == "21":
__A : Optional[int] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A : List[Any] = [2, 2, 20]
__A : Any = [3, 12, 16]
__A : Optional[Any] = [192, 768, 1024]
__A : Optional[int] = CvtForImageClassification(UpperCamelCase__ )
__A : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__A : Optional[int] = image_size
__A : Union[str, Any] = torch.load(UpperCamelCase__, map_location=torch.device('cpu' ) )
__A : List[Any] = OrderedDict()
__A : Optional[int] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A : Optional[int] = list_of_state_dict + cls_token(UpperCamelCase__ )
__A : List[str] = list_of_state_dict + embeddings(UpperCamelCase__ )
for cnt in range(config.depth[idx] ):
__A : Any = list_of_state_dict + attention(UpperCamelCase__, UpperCamelCase__ )
__A : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
__A : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 365 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ):
"""simple docstring"""
__A : List[str] = scheduler
__A : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
__A : List[Any] = split_batches
__A : Any = step_with_optimizer
__A : List[Any] = GradientState()
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : Tuple = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.state_dict()
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
self.scheduler.load_state_dict(__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.get_lr()
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
return self.scheduler.print_lr(*__lowercase , **__lowercase )
| 365 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowercase_ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(lowercase_ ) == 1:
return True
__UpperCamelCase = series[1] - series[0]
for index in range(len(lowercase_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowercase_ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__UpperCamelCase = 0
for val in series:
answer += val
return answer / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = ["model.decoder.embed_positions.weights"]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
__UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__UpperCamelCase = list(state_dict.keys() )
__UpperCamelCase = {}
for key in keys:
__UpperCamelCase = state_dict.pop(lowercase_ )
__UpperCamelCase = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase = val[:hidden_size, :]
__UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase = val
else:
__UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__UpperCamelCase = 10_24
__UpperCamelCase = 24
__UpperCamelCase = 16
elif checkpoint == "medium":
__UpperCamelCase = 15_36
__UpperCamelCase = 48
__UpperCamelCase = 24
elif checkpoint == "large":
__UpperCamelCase = 20_48
__UpperCamelCase = 48
__UpperCamelCase = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__UpperCamelCase = MusicgenDecoderConfig(
hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , )
return config
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_="cpu" ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = MusicGen.get_pretrained(lowercase_ , device=lowercase_ )
__UpperCamelCase = decoder_config_from_checkpoint(lowercase_ )
__UpperCamelCase = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase = rename_state_dict(
lowercase_ , hidden_size=decoder_config.hidden_size )
__UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__UpperCamelCase = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(lowercase_ , strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(lowercase_ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
__UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__UpperCamelCase = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
__UpperCamelCase = 20_48
__UpperCamelCase = 20_48
# set other default generation config params
__UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase = True
__UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 375 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCamelCase : str = logging.get_logger(__name__)
# General docstring
lowerCamelCase : Dict = "MobileNetV1Config"
# Base docstring
lowerCamelCase : Union[str, Any] = "google/mobilenet_v1_1.0_224"
lowerCamelCase : Dict = [1, 1_024, 7, 7]
# Image classification docstring
lowerCamelCase : Any = "google/mobilenet_v1_1.0_224"
lowerCamelCase : str = "tabby, tabby cat"
lowerCamelCase : List[Any] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Union[str, Any]=None ):
'''simple docstring'''
lowerCamelCase_ = {}
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = model.mobilenet_va
else:
lowerCamelCase_ = model
lowerCamelCase_ = 'MobilenetV1/Conv2d_0/'
lowerCamelCase_ = backbone.conv_stem.convolution.weight
lowerCamelCase_ = backbone.conv_stem.normalization.bias
lowerCamelCase_ = backbone.conv_stem.normalization.weight
lowerCamelCase_ = backbone.conv_stem.normalization.running_mean
lowerCamelCase_ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCamelCase_ = i + 1
lowerCamelCase_ = i * 2
lowerCamelCase_ = backbone.layer[pt_index]
lowerCamelCase_ = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCamelCase_ = pointer.convolution.weight
lowerCamelCase_ = pointer.normalization.bias
lowerCamelCase_ = pointer.normalization.weight
lowerCamelCase_ = pointer.normalization.running_mean
lowerCamelCase_ = pointer.normalization.running_var
lowerCamelCase_ = backbone.layer[pt_index + 1]
lowerCamelCase_ = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCamelCase_ = pointer.convolution.weight
lowerCamelCase_ = pointer.normalization.bias
lowerCamelCase_ = pointer.normalization.weight
lowerCamelCase_ = pointer.normalization.running_mean
lowerCamelCase_ = pointer.normalization.running_var
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
lowerCamelCase_ = model.classifier.weight
lowerCamelCase_ = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Any ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
lowerCamelCase_ = tf.train.list_variables(lowercase )
lowerCamelCase_ = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
lowerCamelCase_ = tf.train.load_variable(lowercase , lowercase )
lowerCamelCase_ = array
# Build TF to PyTorch weights loading map
lowerCamelCase_ = _build_tf_to_pytorch_map(lowercase , lowercase , lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
lowerCamelCase_ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
lowerCamelCase_ = np.transpose(lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCamelCase_ = array.squeeze().transpose()
else:
lowerCamelCase_ = np.transpose(lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
lowerCamelCase_ = torch.from_numpy(lowercase )
tf_weights.pop(lowercase , lowercase )
tf_weights.pop(name + '/RMSProp' , lowercase )
tf_weights.pop(name + '/RMSProp_1' , lowercase )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowercase )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def _SCREAMING_SNAKE_CASE ( lowercase : torch.Tensor , lowercase : nn.Convad ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = features.shape[-2:]
lowerCamelCase_ , lowerCamelCase_ = conv_layer.stride
lowerCamelCase_ , lowerCamelCase_ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCamelCase_ = max(kernel_height - stride_height , 0 )
else:
lowerCamelCase_ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCamelCase_ = max(kernel_width - stride_width , 0 )
else:
lowerCamelCase_ = max(kernel_width - (in_width % stride_width) , 0 )
lowerCamelCase_ = pad_along_width // 2
lowerCamelCase_ = pad_along_width - pad_left
lowerCamelCase_ = pad_along_height // 2
lowerCamelCase_ = pad_along_height - pad_top
lowerCamelCase_ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase , lowercase , 'constant' , 0.0 )
class A( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , A_ : MobileNetVaConfig , A_ : int , A_ : int , A_ : int , A_ : Optional[int] = 1 , A_ : Optional[int] = 1 , A_ : bool = False , A_ : Optional[bool] = True , A_ : Optional[bool or str] = True , ) -> None:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowerCamelCase_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCamelCase_ = nn.Convad(
in_channels=A_ , out_channels=A_ , kernel_size=A_ , stride=A_ , padding=A_ , groups=A_ , bias=A_ , padding_mode='zeros' , )
if use_normalization:
lowerCamelCase_ = nn.BatchNormad(
num_features=A_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=A_ , track_running_stats=A_ , )
else:
lowerCamelCase_ = None
if use_activation:
if isinstance(A_ , A_ ):
lowerCamelCase_ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A_ ):
lowerCamelCase_ = ACTaFN[config.hidden_act]
else:
lowerCamelCase_ = config.hidden_act
else:
lowerCamelCase_ = None
def a__ ( self : Tuple , A_ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
lowerCamelCase_ = apply_tf_padding(A_ , self.convolution )
lowerCamelCase_ = self.convolution(A_ )
if self.normalization is not None:
lowerCamelCase_ = self.normalization(A_ )
if self.activation is not None:
lowerCamelCase_ = self.activation(A_ )
return features
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = MobileNetVaConfig
UpperCamelCase = load_tf_weights_in_mobilenet_va
UpperCamelCase = '''mobilenet_v1'''
UpperCamelCase = '''pixel_values'''
UpperCamelCase = False
def a__ ( self : Dict , A_ : Union[nn.Linear, nn.Convad] ) -> None:
"""simple docstring"""
if isinstance(A_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCamelCase : str = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowerCamelCase : List[Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCamelCase , )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : MobileNetVaConfig , A_ : bool = True ) -> Optional[Any]:
"""simple docstring"""
super().__init__(A_ )
lowerCamelCase_ = config
lowerCamelCase_ = 32
lowerCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCamelCase_ = MobileNetVaConvLayer(
A_ , in_channels=config.num_channels , out_channels=A_ , kernel_size=3 , stride=2 , )
lowerCamelCase_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCamelCase_ = nn.ModuleList()
for i in range(13 ):
lowerCamelCase_ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=3 , stride=strides[i] , groups=A_ , ) )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=1 , ) )
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def a__ ( self : Optional[int] , A_ : Union[str, Any] ) -> str:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Union[str, Any] , A_ : Optional[torch.Tensor] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
lowerCamelCase_ = self.conv_stem(A_ )
lowerCamelCase_ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCamelCase_ = layer_module(A_ )
if output_hidden_states:
lowerCamelCase_ = all_hidden_states + (hidden_states,)
lowerCamelCase_ = hidden_states
if self.pooler is not None:
lowerCamelCase_ = torch.flatten(self.pooler(A_ ) , start_dim=1 )
else:
lowerCamelCase_ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=A_ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase , )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , A_ : MobileNetVaConfig ) -> None:
"""simple docstring"""
super().__init__(A_ )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = MobileNetVaModel(A_ )
lowerCamelCase_ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCamelCase_ = nn.Dropout(config.classifier_dropout_prob , inplace=A_ )
lowerCamelCase_ = nn.Linear(A_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : int , A_ : Optional[torch.Tensor] = None , A_ : Optional[bool] = None , A_ : Optional[torch.Tensor] = None , A_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.mobilenet_va(A_ , output_hidden_states=A_ , return_dict=A_ )
lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ = self.classifier(self.dropout(A_ ) )
lowerCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ = 'single_label_classification'
else:
lowerCamelCase_ = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCamelCase_ = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ = BCEWithLogitsLoss()
lowerCamelCase_ = loss_fct(A_ , A_ )
if not return_dict:
lowerCamelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A_ , logits=A_ , hidden_states=outputs.hidden_states , )
| 70 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase ( lowercase__ ):
def __init__(self : int ,SCREAMING_SNAKE_CASE_ : int = 101 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = length
def __len__(self : Any ) -> List[Any]:
"""simple docstring"""
return self.length
def __getitem__(self : str ,SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
"""simple docstring"""
return i
class lowercase :
def __call__(self : List[Any] ,SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE_ ), "labels": torch.tensor(SCREAMING_SNAKE_CASE_ )}
class lowercase ( nn.Module ):
def __init__(self : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCAmelCase = nn.Linear(120 ,80 )
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class lowercase ( lowercase__ ):
@require_torch_neuroncore
def UpperCAmelCase (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"""--output_dir {output_dir}""".split()
lowerCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase ( lowercase__ ):
@require_torch_multi_gpu
def UpperCAmelCase (self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"""--output_dir {output_dir}""".split()
lowerCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase = HfArgumentParser((TrainingArguments,))
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
UpperCAmelCase = DummyDataset(dataset_length)
def __magic_name__ ( _lowerCamelCase: EvalPrediction ) -> Dict:
'''simple docstring'''
lowerCAmelCase = list(range(len(_lowerCamelCase ) ) )
lowerCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
UpperCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase = 2
UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase = None
| 535 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = '''conditional_detr'''
lowerCamelCase :Tuple = ['''past_key_values''']
lowerCamelCase :List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=3_00 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.25 , **lowerCAmelCase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_A = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = backbone_config.get("""model_type""" )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(lowerCAmelCase_ )
_A = use_timm_backbone
_A = backbone_config
_A = num_channels
_A = num_queries
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = init_xavier_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = encoder_layers
_A = auxiliary_loss
_A = position_embedding_type
_A = backbone
_A = use_pretrained_backbone
_A = dilation
# Hungarian matcher
_A = class_cost
_A = bbox_cost
_A = giou_cost
# Loss coefficients
_A = mask_loss_coefficient
_A = dice_loss_coefficient
_A = cls_loss_coefficient
_A = bbox_loss_coefficient
_A = giou_loss_coefficient
_A = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> int:
return self.d_model
def UpperCAmelCase ( self ) -> List[Any]:
_A = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_A = self.backbone_config.to_dict()
_A = self.__class__.model_type
return output
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = version.parse('''1.11''' )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1E-5
@property
def UpperCAmelCase ( self ) -> int:
return 12
| 83 | import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case ( snake_case__ :int) -> Optional[int]:
return EnvironmentCommand()
def snake_case ( snake_case__ :Tuple) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file)
class a ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
_A = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None:
_A = accelerate_config_file
def UpperCAmelCase ( self ) -> Dict:
_A = """not installed"""
if is_safetensors_available():
import safetensors
_A = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_A = """not installed"""
_A = _A = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_A = load_config_from_file(self._accelerate_config_file ).to_dict()
_A = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_A = """not installed"""
_A = """NA"""
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = """not installed"""
_A = """NA"""
if is_tf_available():
import tensorflow as tf
_A = tf.__version__
try:
# deprecated in v2.1
_A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_A = bool(tf.config.list_physical_devices("""GPU""" ) )
_A = """not installed"""
_A = """not installed"""
_A = """not installed"""
_A = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_A = flax.__version__
_A = jax.__version__
_A = jaxlib.__version__
_A = jax.lib.xla_bridge.get_backend().platform
_A = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase_ = """pt"""
elif is_tf_available():
lowercase_ = """tf"""
else:
lowercase_ = """jax"""
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = PerceiverTokenizer
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Any = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCAmelCase__ ( self : int , **_A : Any ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Optional[Any] , _A : Any , _A : Union[str, Any]=False , _A : str=20 , _A : int=5 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(len(_A ) ):
try:
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__SCREAMING_SNAKE_CASE : int = list(filter(lambda _A : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
__SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__SCREAMING_SNAKE_CASE : List[str] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__SCREAMING_SNAKE_CASE : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE : List[str] = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__SCREAMING_SNAKE_CASE : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE : Optional[int] = ''' ''' + output_txt
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Unicode €.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(_A )
__SCREAMING_SNAKE_CASE : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(_A )
self.assertEqual(_A , '''[CLS]Unicode €.[SEP]''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''e è é ê ë''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(_A )
self.assertEqual(_A , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__SCREAMING_SNAKE_CASE : List[str] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__SCREAMING_SNAKE_CASE : List[str] = list(batch.input_ids.numpy()[0] )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
__SCREAMING_SNAKE_CASE : Any = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = ''' He is very happy, UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(_A )
__SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__SCREAMING_SNAKE_CASE : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.__class__.from_pretrained(_A )
__SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : Any = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : Dict = json.load(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
__SCREAMING_SNAKE_CASE : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
| 74 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a_ : Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a_ : Optional[Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __snake_case ( ):
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def __snake_case ( ):
lowerCamelCase_ = "rougeLsum"
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __snake_case ( ):
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def __snake_case ( ):
lowerCamelCase_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def __snake_case ( ):
lowerCamelCase_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def __snake_case ( ):
lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 675 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = FlaxAutoencoderKL
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = 4
__lowercase = 3
__lowercase = (32, 32)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.uniform(lowerCamelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
| 711 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = GPTSwaTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[Any] = False
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = GPTSwaTokenizer(lowerCamelCase__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
__lowercase = '''This is a test'''
__lowercase = '''This is a test'''
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = '''<s>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCamelCase__ ) , 2_000 )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = GPTSwaTokenizer(lowerCamelCase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [465, 287, 265, 631, 842] )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = GPTSwaTokenizer(lowerCamelCase__ )
__lowercase = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__lowercase = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__lowercase = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=lowerCamelCase__ , )
| 362 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ : Optional[Any] = 16
__magic_name__ : int = 32
def A__ ( A_ , A_ , A_ , A_ , A_ = 16 ) -> Optional[int]:
_lowercase = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowercase = DatasetDict(
{
"train": dataset["train"].select(A_ ),
"validation": dataset["train"].select(A_ ),
"test": dataset["validation"],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
_lowercase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A_ , max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase = datasets.map(
A_ , batched=A_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase = 16
elif accelerator.mixed_precision != "no":
_lowercase = 8
else:
_lowercase = None
return tokenizer.pad(
A_ , padding="longest" , max_length=A_ , pad_to_multiple_of=A_ , return_tensors="pt" , )
# Instantiate dataloaders.
_lowercase = DataLoader(
tokenized_datasets["train"] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
_lowercase = DataLoader(
tokenized_datasets["validation"] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
_lowercase = DataLoader(
tokenized_datasets["test"] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def A__ ( A_ , A_ ) -> Any:
# New Code #
_lowercase = []
# Download the dataset
_lowercase = load_dataset("glue" , "mrpc" )
# Create our splits
_lowercase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase = config["lr"]
_lowercase = int(config["num_epochs"] )
_lowercase = int(config["seed"] )
_lowercase = int(config["batch_size"] )
_lowercase = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase = batch_size // MAX_GPU_BATCH_SIZE
_lowercase = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
_lowercase = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
_lowercase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
_lowercase , _lowercase , _lowercase = get_fold_dataloaders(
A_ , A_ , A_ , A_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase = model.to(accelerator.device )
# Instantiate optimizer
_lowercase = AdamW(params=model.parameters() , lr=A_ )
# Instantiate scheduler
_lowercase = get_linear_schedule_with_warmup(
optimizer=A_ , num_warmup_steps=100 , num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase = model(**A_ )
_lowercase = outputs.loss
_lowercase = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase = model(**A_ )
_lowercase = outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=A_ , references=A_ , )
_lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A_ )
# New Code #
# We also run predictions on the test set at the very end
_lowercase = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase = model(**A_ )
_lowercase = outputs.logits
_lowercase , _lowercase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_lowercase = torch.cat(A_ , dim=0 )
_lowercase = torch.stack(A_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_lowercase = metric.compute(predictions=A_ , references=A_ )
accelerator.print("Average test metrics from all folds:" , A_ )
def A__ ( ) -> List[str]:
_lowercase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=A_ , default=A_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=A_ , default=3 , help="The number of splits to perform across the dataset" )
_lowercase = parser.parse_args()
_lowercase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(A_ , A_ )
if __name__ == "__main__":
main()
| 497 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__magic_name__ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__magic_name__ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__magic_name__ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def A__ ( A_ ) -> Any:
with open(A_ , "rb" ) as f:
_lowercase = Image.open(A_ )
return im.convert("RGB" )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'} )
UpperCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case ( self : int ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCamelCase__ )} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A__ ( A_ ) -> Optional[Any]:
_lowercase = torch.stack([example["pixel_values"] for example in examples] )
_lowercase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def A__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A_ , A_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase = {}
if data_args.train_dir is not None:
_lowercase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_lowercase = os.path.join(data_args.validation_dir , "**" )
_lowercase = load_dataset(
"imagefolder" , data_files=A_ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowercase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
_lowercase = dataset["train"].train_test_split(data_args.train_val_split )
_lowercase = split["train"]
_lowercase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowercase = dataset["train"].features["labels"].names
_lowercase , _lowercase = {}, {}
for i, label in enumerate(A_ ):
_lowercase = str(A_ )
_lowercase = label
# Load the accuracy metric from the datasets package
_lowercase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A_ ) , labelaid=A_ , idalabel=A_ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_lowercase = image_processor.size["shortest_edge"]
else:
_lowercase = (image_processor.size["height"], image_processor.size["width"])
_lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_lowercase = Compose(
[
RandomResizedCrop(A_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_lowercase = Compose(
[
Resize(A_ ),
CenterCrop(A_ ),
ToTensor(),
normalize,
] )
def train_transforms(A_ ):
_lowercase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A_ ):
_lowercase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowercase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowercase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A_ )
# Initalize our trainer
_lowercase = Trainer(
model=A_ , args=A_ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A_ , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
_lowercase = None
if training_args.resume_from_checkpoint is not None:
_lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase = last_checkpoint
_lowercase = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowercase = trainer.evaluate()
trainer.log_metrics("eval" , A_ )
trainer.save_metrics("eval" , A_ )
# Write model card and (optionally) push to hub
_lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 497 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase__ = 7_68 , ) -> int:
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(1 , lowerCAmelCase__ ) )
__lowercase = nn.Parameter(torch.ones(1 , lowerCAmelCase__ ) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = nn.Parameter(self.mean.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) )
__lowercase = nn.Parameter(self.std.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) )
return self
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = (embeds - self.mean) * 1.0 / self.std
return embeds
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = (embeds * self.std) + self.mean
return embeds | 522 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a : List[str] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 522 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def __A ( a_ :float , a_ :float , a_ :int) -> float:
__a : Optional[int] = x
__a : Dict = y
for step in range(_lowercase): # noqa: B007
__a : int = a * a - b * b + x
__a : Optional[int] = 2 * a * b + y
__a : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __A ( a_ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def __A ( a_ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1))
def __A ( a_ :int = 8_00 , a_ :int = 6_00 , a_ :float = -0.6 , a_ :float = 0 , a_ :float = 3.2 , a_ :int = 50 , a_ :bool = True , ) -> Image.Image:
__a : List[str] = Image.new('''RGB''' , (image_width, image_height))
__a : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase):
for image_y in range(_lowercase):
# determine the figure-coordinates based on the image-coordinates
__a : List[str] = figure_width / image_width * image_height
__a : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a : Dict = get_distance(_lowercase , _lowercase , _lowercase)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a : Union[str, Any] = get_color_coded_rgb(_lowercase)
else:
__a : Optional[int] = get_black_and_white_rgb(_lowercase)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 52 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a : List[Any] = '''\
'''
a : Optional[int] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a : List[Any] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 , lowerCAmelCase__ = True , lowerCAmelCase__=None ) -> Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a : Optional[Any] = "cuda"
else:
a : str = "cuda" if torch.cuda.is_available() else "cpu"
a : str = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
a : int = model.to(lowerCAmelCase__ )
a : str = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCAmelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a : List[Any] = model.config.max_length - 1
else:
a : Union[str, Any] = model.config.max_length
a : Union[str, Any] = tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="pt" , return_attention_mask=lowerCAmelCase__ , ).to(lowerCAmelCase__ )
a : str = encodings["input_ids"]
a : Optional[int] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a : List[Any] = []
a : List[Any] = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ):
a : Optional[Any] = min(start_index + batch_size , len(lowerCAmelCase__ ) )
a : Any = encoded_texts[start_index:end_index]
a : List[Any] = attn_masks[start_index:end_index]
if add_start_token:
a : int = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCAmelCase__ )
a : str = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
a : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCAmelCase__ ), attn_mask] , dim=1 )
a : Any = encoded_batch
with torch.no_grad():
a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).logits
a : List[Any] = out_logits[..., :-1, :].contiguous()
a : int = labels[..., 1:].contiguous()
a : Union[str, Any] = attn_mask[..., 1:].contiguous()
a : Dict = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowerCAmelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCAmelCase__ )}
| 633 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase_ : List[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCAmelCase ( __a ):
snake_case : List[Any] = """esm"""
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_2_6 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : List[str] = position_embedding_type
_UpperCAmelCase : str = use_cache
_UpperCAmelCase : Dict = emb_layer_norm_before
_UpperCAmelCase : Optional[int] = token_dropout
_UpperCAmelCase : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
_UpperCAmelCase : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = EsmFoldConfig(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
_UpperCAmelCase : str = get_default_vocab_list()
else:
_UpperCAmelCase : Optional[Any] = vocab_list
else:
_UpperCAmelCase : Any = None
_UpperCAmelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCAmelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCAmelCase :
snake_case : str = None
snake_case : bool = True
snake_case : bool = False
snake_case : bool = False
snake_case : bool = False
snake_case : float = 0
snake_case : bool = True
snake_case : bool = False
snake_case : int = 1_2_8
snake_case : "TrunkConfig" = None
def snake_case_ (self ):
if self.trunk is None:
_UpperCAmelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = TrunkConfig(**self.trunk )
def snake_case_ (self ):
_UpperCAmelCase : Dict = asdict(self )
_UpperCAmelCase : Union[str, Any] = self.trunk.to_dict()
return output
@dataclass
class __lowerCAmelCase :
snake_case : int = 4_8
snake_case : int = 1_0_2_4
snake_case : int = 1_2_8
snake_case : int = 3_2
snake_case : int = 3_2
snake_case : int = 3_2
snake_case : float = 0
snake_case : float = 0
snake_case : bool = False
snake_case : int = 4
snake_case : Optional[int] = 1_2_8
snake_case : "StructureModuleConfig" = None
def snake_case_ (self ):
if self.structure_module is None:
_UpperCAmelCase : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_UpperCAmelCase : Optional[int] = self.sequence_state_dim // self.sequence_head_width
_UpperCAmelCase : Any = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = asdict(self )
_UpperCAmelCase : str = self.structure_module.to_dict()
return output
@dataclass
class __lowerCAmelCase :
snake_case : int = 3_8_4
snake_case : int = 1_2_8
snake_case : int = 1_6
snake_case : int = 1_2_8
snake_case : int = 1_2
snake_case : int = 4
snake_case : int = 8
snake_case : float = 0.1
snake_case : int = 8
snake_case : int = 1
snake_case : int = 2
snake_case : int = 7
snake_case : int = 1_0
snake_case : float = 1e-8
snake_case : float = 1e5
def snake_case_ (self ):
return asdict(self )
def __A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 156 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
snake_case : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case : str = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
# test with a raw waveform
_UpperCAmelCase : Any = np.zeros((3_4_0_0_0,) )
_UpperCAmelCase : Optional[Any] = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : str = examples
_UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
_UpperCAmelCase : int = audio_classifier(lowerCAmelCase__ , top_k=1 )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
self.run_torchaudio(lowerCAmelCase__ )
@require_torchaudio
def snake_case_ (self , lowerCAmelCase__ ):
import datasets
# test with a local file
_UpperCAmelCase : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_UpperCAmelCase : List[str] = dataset[0]["""audio"""]["""array"""]
_UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase : str = np.ones((8_0_0_0,) )
_UpperCAmelCase : Tuple = audio_classifier(lowerCAmelCase__ , top_k=4 )
_UpperCAmelCase : List[Any] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_UpperCAmelCase : Any = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_UpperCAmelCase : Any = {"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_UpperCAmelCase : List[Any] = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case_ (self ):
import datasets
_UpperCAmelCase : int = """superb/wav2vec2-base-superb-ks"""
_UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_UpperCAmelCase : Union[str, Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_UpperCAmelCase : Optional[int] = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def snake_case_ (self ):
pass
| 156 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 1 |
'''simple docstring'''
UpperCamelCase_ = '''\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
UpperCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 707 | '''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : int = ['pixel_values']
def __init__( self : int , UpperCamelCase_ : bool = True , UpperCamelCase_ : int = 32 , UpperCamelCase_ : List[Any]=PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , **UpperCamelCase_ : List[Any] , ) -> None:
SCREAMING_SNAKE_CASE__ :Tuple = do_resize
SCREAMING_SNAKE_CASE__ :Optional[int] = do_rescale
SCREAMING_SNAKE_CASE__ :Dict = size_divisor
SCREAMING_SNAKE_CASE__ :Any = resample
super().__init__(**UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : Union[str, Any] ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = get_image_size(UpperCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ :Tuple = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ :Optional[int] = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ :Optional[Any] = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
return image
def __lowerCamelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : List[str] ) -> np.ndarray:
return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[TensorType, str]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[Any] , ) -> BatchFeature:
SCREAMING_SNAKE_CASE__ :Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ :Dict = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ :int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
SCREAMING_SNAKE_CASE__ :Optional[int] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ :List[str] = [to_numpy_array(UpperCamelCase_ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ :Optional[Any] = [self.rescale(UpperCamelCase_ , scale=1 / 2_55 ) for image in images]
SCREAMING_SNAKE_CASE__ :int = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ :Tuple = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 320 | 0 |
'''simple docstring'''
def _A ( snake_case__ : int ):
snake_case__ : Tuple = int(snake_case__ )
if n_element < 1:
snake_case__ : Dict = ValueError('''a should be a positive number''' )
raise my_error
snake_case__ : Union[str, Any] = [1]
snake_case__ ,snake_case__ ,snake_case__ : Optional[Any] = (0, 0, 0)
snake_case__ : Optional[int] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_lowerCAmelCase : Any = hamming(int(n))
print("-----------------------------------------------------")
print(F'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 261 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : Optional[Any]=None , snake_case__ : Any=None ):
if attention_mask is None:
snake_case__ : Optional[int] = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case :
"""simple docstring"""
_lowerCAmelCase = OPTConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=16 , lowerCamelCase=16 , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : List[str] = batch_size
snake_case__ : str = seq_length
snake_case__ : Union[str, Any] = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Union[str, Any] = eos_token_id
snake_case__ : Optional[int] = pad_token_id
snake_case__ : Dict = bos_token_id
snake_case__ : List[Any] = embed_dim
snake_case__ : Tuple = word_embed_proj_dim
snake_case__ : Any = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : int = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCamelCase , **self.config_updates , )
snake_case__ : Dict = prepare_opt_inputs_dict(lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = TFOPTModel(config=lowerCamelCase )
snake_case__ : str = inputs_dict['''input_ids''']
snake_case__ : List[str] = input_ids[:1, :]
snake_case__ : Tuple = inputs_dict['''attention_mask'''][:1, :]
snake_case__ : Optional[Any] = 1
# first forward pass
snake_case__ : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ ,snake_case__ : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ : Any = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowerCAmelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowerCAmelCase = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 1_0
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = TFOPTModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
snake_case__ : Tuple = model_class(config=lowerCamelCase )
snake_case__ : Tuple = _get_word_embedding_weight(lowerCamelCase , model.get_input_embeddings() )
snake_case__ : List[Any] = _get_word_embedding_weight(lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase )
snake_case__ : int = _get_word_embedding_weight(lowerCamelCase , model.get_input_embeddings() )
snake_case__ : Optional[Any] = _get_word_embedding_weight(lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
snake_case__ : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCamelCase )
# check that weights remain the same after resizing
snake_case__ : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case__ : Optional[int] = False
self.assertTrue(lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCamelCase )
snake_case__ : Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case__ : Optional[Any] = False
self.assertTrue(lowerCamelCase )
def _A ( snake_case__ : Optional[Any] ):
return tf.constant(snake_case__ , dtype=tf.intaa )
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = 9_9
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
snake_case__ : int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
snake_case__ : Dict = input_ids.shape[0]
snake_case__ : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
snake_case__ : List[str] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
snake_case__ : Optional[Any] = tf.not_equal(lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
snake_case__ : List[str] = model(input_ids=lowerCamelCase , attention_mask=lowerCamelCase ).last_hidden_state
snake_case__ : Dict = (1, 11, 512)
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : int = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=4E-3 ) )
snake_case__ : Optional[int] = tf.function(lowerCamelCase , jit_compile=lowerCamelCase )
snake_case__ : Dict = xla_generate(lowerCamelCase , lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=4E-2 ) )
@require_tf
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> int:
"""simple docstring"""
super().setUp()
snake_case__ : str = '''facebook/opt-350m'''
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
snake_case__ : List[str] = GPTaTokenizer.from_pretrained(self.path_model )
snake_case__ : Dict = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
snake_case__ : Union[str, Any] = tokenizer(lowerCamelCase , return_tensors='''tf''' , padding=lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ : List[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
snake_case__ : Tuple = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
snake_case__ : Optional[Any] = tf.function(lowerCamelCase , jit_compile=lowerCamelCase )
snake_case__ : int = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
@require_tf
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''facebook/opt-125m'''
snake_case__ : Dict = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
snake_case__ : Dict = []
snake_case__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
for prompt in self.prompts:
snake_case__ : Tuple = tokenizer(lowerCamelCase , return_tensors='''tf''' ).input_ids
snake_case__ : Optional[int] = model.generate(lowerCamelCase , max_length=10 )
snake_case__ : Optional[int] = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = '''facebook/opt-350m'''
snake_case__ : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
snake_case__ : str = '''left'''
# use different length sentences to test batching
snake_case__ : List[str] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case__ : List[str] = tokenizer(lowerCamelCase , return_tensors='''tf''' , padding=lowerCamelCase )
snake_case__ : Tuple = inputs['''input_ids''']
snake_case__ : Any = model.generate(input_ids=lowerCamelCase , attention_mask=inputs['''attention_mask'''] )
snake_case__ : Tuple = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case__ : Union[str, Any] = model.generate(input_ids=lowerCamelCase )
snake_case__ : str = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
snake_case__ : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case__ : int = model.generate(input_ids=lowerCamelCase , max_length=model.config.max_length - num_paddings )
snake_case__ : int = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
snake_case__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase )
snake_case__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase )
snake_case__ : Any = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , [non_padded_sentence, padded_sentence] )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str = '''facebook/opt-350m'''
snake_case__ : int = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
snake_case__ : Optional[Any] = []
snake_case__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
for prompt in self.prompts:
snake_case__ : List[str] = tokenizer(lowerCamelCase , return_tensors='''tf''' ).input_ids
snake_case__ : int = model.generate(lowerCamelCase , max_length=10 )
snake_case__ : Tuple = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 261 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : Tuple = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__A : List[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__A : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__A : Dict = {"unk_token": "<unk>"}
__A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
__A : Dict = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__A : Any = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self , **__UpperCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __UpperCAmelCase( self , **__UpperCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __UpperCAmelCase( self , **__UpperCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __UpperCAmelCase( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase( self ):
__A : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : int = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase( self ):
__A : Optional[int] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : Any = self.get_image_processor()
__A : List[Any] = CLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__A : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
__A : List[str] = CLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__A : Tuple = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__A : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Dict = self.get_tokenizer()
__A : List[Any] = CLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__A : List[str] = self.prepare_image_inputs()
__A : int = image_processor(__UpperCAmelCase , return_tensors="np" )
__A : List[str] = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase( self ):
__A : Tuple = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__A : Optional[int] = "lower newer"
__A : Optional[Any] = processor(text=__UpperCAmelCase )
__A : Any = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase( self ):
__A : Optional[int] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[int] = CLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__A : Dict = "lower newer"
__A : str = self.prepare_image_inputs()
__A : Optional[Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Dict = CLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__A : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Tuple = processor.batch_decode(__UpperCAmelCase )
__A : Optional[Any] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[Any] = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Optional[int] = CLIPProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__A : Union[str, Any] = "lower newer"
__A : List[str] = self.prepare_image_inputs()
__A : int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 702 | import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=False , ):
__A : Tuple = size if size is not None else {"height": 20, "width": 20}
__A : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A : int = parent
__A : List[Any] = batch_size
__A : Tuple = num_channels
__A : Any = image_size
__A : Optional[int] = min_resolution
__A : Any = max_resolution
__A : str = do_resize
__A : Tuple = size
__A : Tuple = do_center_crop
__A : Union[str, Any] = crop_size
__A : Tuple = do_normalize
__A : Union[str, Any] = image_mean
__A : Dict = image_std
__A : Optional[Any] = do_reduce_labels
def __UpperCAmelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> str:
__A : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : Optional[Any] = Image.open(dataset[0]["file"] )
__A : Union[str, Any] = Image.open(dataset[1]["file"] )
return image, map
def lowerCamelCase_ ( ) -> Dict:
__A : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : List[Any] = Image.open(ds[0]["file"] )
__A : Union[str, Any] = Image.open(ds[1]["file"] )
__A : Optional[Any] = Image.open(ds[2]["file"] )
__A : str = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase( self ):
__A : Tuple = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase( self ):
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
__A : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCAmelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
__A : Tuple = []
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A : str = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
__A : Any = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
__A , __A : Optional[Any] = prepare_semantic_single_inputs()
__A : Dict = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
__A , __A : List[Any] = prepare_semantic_batch_inputs()
__A : Tuple = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A : List[Any] = prepare_semantic_single_inputs()
__A : Optional[int] = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
__A : Optional[Any] = True
__A : int = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 387 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "ctrl"
a__ : Tuple = ["past_key_values"]
a__ : Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __lowerCAmelCase : Tuple=24_65_34 , __lowerCAmelCase : str=2_56 , __lowerCAmelCase : int=12_80 , __lowerCAmelCase : int=81_92 , __lowerCAmelCase : str=48 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=1E-6 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=True , **__lowerCAmelCase : List[Any] , ) -> List[str]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = dff
_A = resid_pdrop
_A = embd_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
super().__init__(**__lowerCAmelCase )
| 2 |
'''simple docstring'''
import re
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 421 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> Dict:
lowercase__ : Dict = value
lowercase__ : Node | None = None
lowercase__ : Node | None = None
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = tree
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> List[str]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ) -> Dict:
lowercase__ : Union[str, Any] = parent
lowercase__ : Union[str, Any] = 13
lowercase__ : Dict = 7
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = True
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = 99
lowercase__ : Dict = 32
lowercase__ : Optional[int] = 2
lowercase__ : str = 4
lowercase__ : List[str] = 37
lowercase__ : Tuple = '''gelu'''
lowercase__ : Optional[int] = 0.1
lowercase__ : Optional[Any] = 0.1
lowercase__ : Dict = 512
lowercase__ : Optional[Any] = 16
lowercase__ : int = 2
lowercase__ : int = 0.0_2
lowercase__ : str = 3
lowercase__ : Optional[Any] = 4
lowercase__ : Optional[Any] = None
def _lowerCAmelCase( self ) -> str:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Tuple = None
if self.use_input_mask:
lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = None
lowercase__ : Union[str, Any] = None
lowercase__ : Any = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : Any = TFRoFormerModel(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ : Union[str, Any] = [input_ids, input_mask]
lowercase__ : Union[str, Any] = model(__lowerCAmelCase )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : Optional[Any] = True
lowercase__ : str = TFRoFormerForCausalLM(config=__lowerCAmelCase )
lowercase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Dict = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : List[str] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : Optional[int] = self.num_labels
lowercase__ : Tuple = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.num_choices
lowercase__ : Dict = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
lowercase__ : List[str] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : List[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Dict = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = TFRoFormerModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : str = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
lowercase__ : str = 50000
lowercase__ : List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ : Union[str, Any] = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = tf.constant([[4, 10]] )
lowercase__ : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase__ : Optional[int] = emba(input_ids.shape )
lowercase__ : Optional[Any] = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[Any] = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase__ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase__ : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> Tuple:
# 2,12,16,64
lowercase__ : Dict = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase__ : Tuple = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__ , lowercase__ : Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase__ : Tuple = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 428 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
# Initialise PyTorch model
lowercase_ : Any = FunnelConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : int = FunnelBaseModel(__SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 425 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE =TypeVar("T")
class UpperCamelCase ( Generic[T] ):
def __init__( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = data
lowercase_ : Node[T] | None = None
def __str__( self ) -> str:
'''simple docstring'''
return f'''{self.data}'''
class UpperCamelCase ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
lowercase_ : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
'''simple docstring'''
lowercase_ : Dict = self.top
while node:
yield node.data
lowercase_ : int = node.next
def __str__( self ) -> str:
'''simple docstring'''
return "->".join([str(__UpperCamelCase ) for item in self] )
def __len__( self ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def _UpperCAmelCase ( self ) -> bool:
'''simple docstring'''
return self.top is None
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : List[str] = Node(__UpperCamelCase )
if not self.is_empty():
lowercase_ : Union[str, Any] = self.top
lowercase_ : int = node
def _UpperCAmelCase ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,__UpperCamelCase )
lowercase_ : Optional[Any] = self.top
lowercase_ : List[str] = self.top.next
return pop_node.data
def _UpperCAmelCase ( self ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 425 | 1 |
"""simple docstring"""
def _snake_case ( ) -> Optional[Any]:
lowerCAmelCase : List[str] = []
lowerCAmelCase : Any = 1
while len(_lowerCAmelCase ) < 1E6:
constant.append(str(_lowerCAmelCase ) )
i += 1
lowerCAmelCase : Any = "".join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 716 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637 | 0 |
from pathlib import Path
import fire
def A__ ( lowercase: str, lowercase: str, lowercase: int ) -> List[str]:
A : List[Any] =Path(lowercase )
A : Optional[Any] =Path(lowercase )
dest_dir.mkdir(exist_ok=lowercase )
for path in src_dir.iterdir():
A : Tuple =[x.rstrip() for x in list(path.open().readlines() )][:n]
A : Optional[int] =dest_dir.joinpath(path.name )
print(lowercase )
dest_path.open('w' ).write('\n'.join(lowercase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 305 | def A__ ( lowercase: str ) -> str:
return " ".join(
''.join(word[::-1] ) if len(lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 305 | 1 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a_ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCAmelCase ( A__: str , A__: str ) -> int:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(A__ )
__lowerCamelCase , __lowerCamelCase : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
A__ , output_loading_info=A__ )
else:
__lowerCamelCase : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(A__ )
__lowerCamelCase , __lowerCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
A__ , output_loading_info=A__ )
__lowerCamelCase : Any = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : Tuple = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Tuple = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : Dict = prophet
__lowerCamelCase : str = prophet_old
else:
__lowerCamelCase : Dict = prophet.prophetnet
__lowerCamelCase : List[str] = prophet_old.model
__lowerCamelCase : Dict = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : int = mapping[attribute]
if not hasattr(A__ , A__ ) and len(A__ ) > 0:
__lowerCamelCase : Optional[Any] = attribute
elif hasattr(A__ , A__ ):
__lowerCamelCase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : Optional[Any] = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
__lowerCamelCase : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : Union[str, Any] = old_model.bias
logger.info(f'''{attribute} is initialized''' )
__lowerCamelCase : Dict = True
break
elif attribute in special_keys and hasattr(A__ , 'in_proj_weight' ):
__lowerCamelCase : Tuple = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : List[Any] = getattr(A__ , A__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : int = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : List[str] = model[int(A__ )]
__lowerCamelCase : Union[str, Any] = old_model[int(A__ )]
else:
__lowerCamelCase : List[str] = getattr(A__ , A__ )
if old_attribute == "":
__lowerCamelCase : Any = old_model
else:
if not hasattr(A__ , A__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
__lowerCamelCase : Any = getattr(A__ , A__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A__ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ : List[str] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 263 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowercase:
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=4 , __a="gelu" , __a=0.0 , __a=0.1 , __a=True , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : str = use_labels
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : str = intermediate_multiple_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : str = hidden_dropout
__lowerCamelCase : List[str] = attention_dropout
__lowerCamelCase : Tuple = weight_tying
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Union[str, Any] = type_sequence_label_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Optional[int] = num_labels
__lowerCamelCase : str = num_choices
__lowerCamelCase : Tuple = scope
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : str = None
if self.use_input_mask:
__lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Optional[Any] = None
if self.use_labels:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : int = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case_ ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase : Tuple = True
return config, input_ids, input_mask, token_labels
def snake_case_ ( self , __a , __a , __a ):
__lowerCamelCase : Any = GPTNeoXJapaneseModel(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : int = model(__a , attention_mask=__a )
__lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , __a , __a , __a ):
__lowerCamelCase : List[str] = True
__lowerCamelCase : Tuple = GPTNeoXJapaneseModel(__a )
model.to(__a )
model.eval()
__lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , __a , __a , __a , __a ):
__lowerCamelCase : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : List[str] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , __a , __a , __a ):
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = GPTNeoXJapaneseForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__lowerCamelCase : List[Any] = model(__a , attention_mask=__a , use_cache=__a )
__lowerCamelCase : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase : int = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase : int = model(__a , attention_mask=__a , output_hidden_states=__a )
__lowerCamelCase : Dict = output_from_no_past['hidden_states'][0]
__lowerCamelCase : Any = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0]
# select random slice
__lowerCamelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = config_and_inputs
__lowerCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__a : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__a : Optional[int] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : Optional[Any] = False
__a : List[Any] = False
__a : List[Any] = False
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = GPTNeoXJapaneseModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def snake_case_ ( self ):
# This regression test was failing with PyTorch < 1.3
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase : Tuple = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
@slow
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = 'abeja/gpt-neox-japanese-2.7b'
__lowerCamelCase : Any = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__lowerCamelCase : Union[str, Any] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__lowerCamelCase : Dict = GPTNeoXJapaneseTokenizer.from_pretrained(__a )
__lowerCamelCase : str = GPTNeoXJapaneseForCausalLM.from_pretrained(__a )
__lowerCamelCase : List[str] = []
for prompt in prompts:
__lowerCamelCase : List[Any] = tokenizer(__a , return_tensors='pt' ).input_ids
__lowerCamelCase : List[Any] = model.generate(__a , max_length=50 )
__lowerCamelCase : str = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 263 | 1 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =DistilBertTokenizer
a__ =DistilBertTokenizerFast
a__ =True
@slow
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
_UpperCAmelCase : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
_UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(A )
_UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 506 |
"""simple docstring"""
from string import ascii_uppercase
_lowerCAmelCase :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
_UpperCAmelCase : Optional[int] = ''''''
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Dict = 0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : List[str] = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
_UpperCAmelCase : Optional[int] = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
_UpperCAmelCase : Any = str(UpperCamelCase__ )
new_value += actual_value
_UpperCAmelCase : Tuple = num // base
_UpperCAmelCase : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 506 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase( a__ : float , a__ : float , a__ : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
def lowerCAmelCase( a__ : Tuple=2_8123 ):
'''simple docstring'''
lowerCamelCase__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCamelCase__ = set()
lowerCamelCase__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(a__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 426 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
snake_case : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
snake_case : Union[str, Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('''\n'''.join(upper_files) + '''\n''')
snake_case : Any = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('''\n'''.join(space_files) + '''\n''')
snake_case : Tuple = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('''\n'''.join(hyphen_files) + '''\n''')
snake_case : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('''\n'''.join(nodir_files) + '''\n''')
snake_case : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 445 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :int = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 445 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE_ : Optional[Any] =parent
SCREAMING_SNAKE_CASE_ : List[Any] =batch_size
SCREAMING_SNAKE_CASE_ : Tuple =num_channels
SCREAMING_SNAKE_CASE_ : int =image_size
SCREAMING_SNAKE_CASE_ : Dict =min_resolution
SCREAMING_SNAKE_CASE_ : Tuple =max_resolution
SCREAMING_SNAKE_CASE_ : Dict =do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] =size
SCREAMING_SNAKE_CASE_ : str =do_normalize
SCREAMING_SNAKE_CASE_ : int =image_mean
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image_std
def _snake_case ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = DPTImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] =DPTImageProcessingTester(self )
@property
def _snake_case ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Tuple =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _snake_case ( self ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _snake_case ( self ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _snake_case ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 431 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> str:
# Load configuration defined in the metadata file
with open(UpperCAmelCase_ ) as metadata_file:
SCREAMING_SNAKE_CASE_ : Dict =json.load(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =LukeConfig(use_entity_aware_attention=UpperCAmelCase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ : List[Any] =torch.load(UpperCAmelCase_ , map_location='''cpu''' )
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ : int =load_entity_vocab(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] =AddedToken('''<ent>''' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =AddedToken('''<ent2>''' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =LukeTokenizer.from_pretrained(UpperCAmelCase_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ : List[str] =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] =word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Tuple =word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] =torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =f'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE_ : List[Any] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Optional[Any] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : int =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE_ : List[str] =entity_emb[entity_vocab['''[MASK]''']]
SCREAMING_SNAKE_CASE_ : Any =LukeModel(config=UpperCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if not (len(UpperCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(UpperCAmelCase_ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
SCREAMING_SNAKE_CASE_ : int =LukeTokenizer.from_pretrained(UpperCAmelCase_ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE_ : List[str] =(
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
SCREAMING_SNAKE_CASE_ : List[Any] =(3_9, 4_2)
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer(UpperCAmelCase_ , entity_spans=[span] , add_prefix_space=UpperCAmelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(**UpperCAmelCase_ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE_ : Tuple =torch.Size((1, 4_2, 1_0_2_4) )
SCREAMING_SNAKE_CASE_ : Any =torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
SCREAMING_SNAKE_CASE_ : List[str] =torch.Size((1, 4_2, 7_6_8) )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE_ : Any =torch.Size((1, 1, 1_0_2_4) )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCAmelCase_ ) )
model.save_pretrained(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any ) -> int:
SCREAMING_SNAKE_CASE_ : str ={}
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =line.rstrip().split('''\t''' )
SCREAMING_SNAKE_CASE_ : Tuple =index
return entity_vocab
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowercase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 431 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = VideoToVideoSDPipeline
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ : str = False
# No `output_type`.
snake_case__ : int = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
_lowerCamelCase : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_lowerCamelCase : List[str] = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
_lowerCamelCase : str = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : List[Any] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : int = VideoToVideoSDPipeline(**__lowerCAmelCase )
_lowerCamelCase : str = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = '''np'''
_lowerCamelCase : Tuple = sd_pipe(**__lowerCAmelCase ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
_lowerCamelCase : Dict = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_lowerCamelCase : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowerCamelCase : Optional[int] = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=__lowerCAmelCase )
_lowerCamelCase : List[Any] = video.to('''cuda''' )
_lowerCamelCase : Tuple = '''Spiderman is surfing'''
_lowerCamelCase : Any = pipe(__lowerCAmelCase , video=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=3 , output_type='''pt''' ).frames
_lowerCamelCase : Dict = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 83 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
SCREAMING_SNAKE_CASE_ : Tuple = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _snake_case ( UpperCAmelCase_ : Any ):
A__ = (images / 2 + 0.5).clamp(0 , 1 )
A__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A__ = numpy_to_pil(UpperCAmelCase_ )
return images
def _snake_case ( UpperCAmelCase_ : List[str] ):
if images.ndim == 3:
A__ = images[None, ...]
A__ = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A__ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
A__ = [Image.fromarray(UpperCAmelCase_ ) for image in images]
return pil_images
| 500 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ : Tuple = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
SCREAMING_SNAKE_CASE_ : Optional[int] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
SCREAMING_SNAKE_CASE_ : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
A__ = new_id
# turn into Numpy arrays
A__ = np.array(UpperCAmelCase_ )
A__ = np.array(UpperCAmelCase_ )
if reduce_labels:
A__ = 255
A__ = label - 1
A__ = 255
A__ = label != ignore_index
A__ = np.not_equal(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = pred_label[mask]
A__ = np.array(UpperCAmelCase_ )[mask]
A__ = pred_label[pred_label == label]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ , A__ , A__ , A__ = intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ , A__ , A__ , A__ = total_intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# compute metrics
A__ = {}
A__ = total_area_intersect.sum() / total_area_label.sum()
A__ = total_area_intersect / total_area_union
A__ = total_area_intersect / total_area_label
A__ = np.nanmean(UpperCAmelCase_ )
A__ = np.nanmean(UpperCAmelCase_ )
A__ = all_acc
A__ = iou
A__ = acc
if nan_to_num is not None:
A__ = {metric: np.nan_to_num(UpperCAmelCase_ , nan=UpperCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def UpperCamelCase ( self: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int , UpperCamelCase: bool , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[Dict[int, int]] = None , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = mean_iou(
results=UpperCamelCase , gt_seg_maps=UpperCamelCase , num_labels=UpperCamelCase , ignore_index=UpperCamelCase , nan_to_num=UpperCamelCase , label_map=UpperCamelCase , reduce_labels=UpperCamelCase , )
return iou_result
| 500 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> int:
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE = grid[0]
for row_n in range(1 , len(__A ) ):
_SCREAMING_SNAKE_CASE = grid[row_n]
_SCREAMING_SNAKE_CASE = fill_row(__A , __A )
_SCREAMING_SNAKE_CASE = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__A ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str=3 , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=[8, 1_6, 3_2, 6_4] , __lowerCamelCase : Union[str, Any]=[1, 1, 2, 1] , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=["stage2", "stage3", "stage4"] , __lowerCamelCase : List[Any]=[2, 3, 4] , __lowerCamelCase : int=1 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embeddings_size
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = out_features
_SCREAMING_SNAKE_CASE = out_indices
_SCREAMING_SNAKE_CASE = num_groups
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( A , A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE = layer_type
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase_ = BitConfig
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
| 418 | 1 |
"""simple docstring"""
from collections import defaultdict
def UpperCamelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(""" """ , """""" )
__a = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = input("""Enter the first string """).strip()
__A = input("""Enter the second string """).strip()
__A = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 173 | """simple docstring"""
import random
from typing import Any
def UpperCamelCase ( _lowerCAmelCase : list ):
for _ in range(len(_lowerCAmelCase ) ):
__a = random.randint(0 , len(_lowerCAmelCase ) - 1 )
__a = random.randint(0 , len(_lowerCAmelCase ) - 1 )
__a , __a = data[b], data[a]
return data
if __name__ == "__main__":
__A = [0, 1, 2, 3, 4, 5, 6, 7]
__A = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 173 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( UpperCamelCase : str = "AAPL" ):
"""simple docstring"""
A__ : Union[str, Any] =F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
A__ : Dict =BeautifulSoup(requests.get(UpperCamelCase ).text , "html.parser" )
A__ : str ="My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
import doctest
from collections import deque
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = [2, 1, 2, -1]
lowerCAmelCase__ :List[str] = [1, 2, 3, 4]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = len(self.first_signal )
lowerCAmelCase__ :int = len(self.second_signal )
lowerCAmelCase__ :Optional[Any] = max(__UpperCAmelCase , __UpperCAmelCase )
# create a zero matrix of max_length x max_length
lowerCAmelCase__ :Tuple = [[0] * max_length for i in range(__UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = deque(self.second_signal )
rotated_signal.rotate(__UpperCAmelCase )
for j, item in enumerate(__UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase__ :List[str] = np.matmul(np.transpose(__UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__A = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""DPTFeatureExtractor"""]
__A = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 560 | 0 |
def a__ ( A__ ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCAmelCase__ : int =int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 101 | '''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''wavlm'''
def __init__( self : Optional[int] , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : Any=7_6_8 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : List[Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-5 , lowerCAmelCase__ : Optional[Any]="group" , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ : int=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : str=1_2_8 , lowerCAmelCase__ : str=1_6 , lowerCAmelCase__ : Tuple=3_2_0 , lowerCAmelCase__ : Optional[int]=8_0_0 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Union[str, Any]=0.05 , lowerCAmelCase__ : Union[str, Any]=1_0 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Union[str, Any]=1_0 , lowerCAmelCase__ : List[str]=3_2_0 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=1_0_0 , lowerCAmelCase__ : Union[str, Any]=2_5_6 , lowerCAmelCase__ : Tuple=2_5_6 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Any="mean" , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Optional[int]=2_5_6 , lowerCAmelCase__ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCAmelCase__ : str=(5, 3, 3, 1, 1) , lowerCAmelCase__ : Optional[Any]=(1, 2, 3, 1, 1) , lowerCAmelCase__ : List[str]=5_1_2 , lowerCAmelCase__ : Tuple=8_0 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Dict , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : int = feat_extract_norm
_UpperCAmelCase : Optional[int] = feat_extract_activation
_UpperCAmelCase : List[str] = list(lowerCAmelCase__ )
_UpperCAmelCase : Any = list(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = list(lowerCAmelCase__ )
_UpperCAmelCase : int = conv_bias
_UpperCAmelCase : Union[str, Any] = num_buckets
_UpperCAmelCase : Dict = max_bucket_distance
_UpperCAmelCase : int = num_conv_pos_embeddings
_UpperCAmelCase : str = num_conv_pos_embedding_groups
_UpperCAmelCase : Optional[Any] = len(self.conv_dim )
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = hidden_dropout
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : int = feat_proj_dropout
_UpperCAmelCase : List[str] = final_dropout
_UpperCAmelCase : Optional[Any] = layerdrop
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = num_ctc_classes
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[Any] = do_stable_layer_norm
_UpperCAmelCase : str = use_weighted_layer_sum
_UpperCAmelCase : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : str = apply_spec_augment
_UpperCAmelCase : List[str] = mask_time_prob
_UpperCAmelCase : Tuple = mask_time_length
_UpperCAmelCase : Optional[int] = mask_time_min_masks
_UpperCAmelCase : str = mask_feature_prob
_UpperCAmelCase : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase : List[Any] = num_codevectors_per_group
_UpperCAmelCase : Tuple = num_codevector_groups
_UpperCAmelCase : Optional[Any] = contrastive_logits_temperature
_UpperCAmelCase : Optional[int] = num_negatives
_UpperCAmelCase : List[Any] = codevector_dim
_UpperCAmelCase : int = proj_codevector_dim
_UpperCAmelCase : Optional[Any] = diversity_loss_weight
# ctc loss
_UpperCAmelCase : Dict = ctc_loss_reduction
_UpperCAmelCase : int = ctc_zero_infinity
# adapter
_UpperCAmelCase : Optional[Any] = add_adapter
_UpperCAmelCase : Tuple = adapter_kernel_size
_UpperCAmelCase : str = adapter_stride
_UpperCAmelCase : Optional[Any] = num_adapter_layers
_UpperCAmelCase : Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Union[str, Any] = list(lowerCAmelCase__ )
_UpperCAmelCase : str = list(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = list(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = xvector_output_dim
@property
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 494 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
UpperCamelCase__ : Any = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Union[str, Any] = 2
UpperCamelCase__ : Optional[int] = 3
UpperCamelCase__ : Dict = 4
class lowerCAmelCase_ ( lowercase__ ):
__a : str = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : str = "left"
def __init__( self ,snake_case__ ,snake_case__=False ,snake_case__=True ,snake_case__=False ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="<unk>" ,snake_case__="<sep>" ,snake_case__="<pad>" ,snake_case__="<cls>" ,snake_case__="<mask>" ,snake_case__=["<eop>", "<eod>"] ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(UpperCAmelCase__ ,lstrip=UpperCAmelCase__ ,rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) else mask_token
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ ,remove_space=UpperCAmelCase__ ,keep_accents=UpperCAmelCase__ ,bos_token=UpperCAmelCase__ ,eos_token=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,sep_token=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,cls_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,additional_special_tokens=UpperCAmelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**UpperCAmelCase__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : int = do_lower_case
SCREAMING_SNAKE_CASE_ : Tuple = remove_space
SCREAMING_SNAKE_CASE_ : List[Any] = keep_accents
SCREAMING_SNAKE_CASE_ : Tuple = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self ,snake_case__ ):
if self.remove_space:
SCREAMING_SNAKE_CASE_ : str = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE_ : int = inputs
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ : Any = unicodedata.normalize('NFKD' ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ : int = outputs.lower()
return outputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.preprocess_text(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.encode(UpperCAmelCase__ ,out_type=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def snake_case ( self ,snake_case__ ):
return self.sp_model.PieceToId(UpperCAmelCase__ )
def snake_case ( self ,snake_case__ ):
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ ,' ' ).strip()
return out_string
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = None ,snake_case__ = True ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('use_source_tokenizer' ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.convert_ids_to_tokens(UpperCAmelCase__ ,skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE_ : Any = ''''''.join(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_ : str = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ ,token_ids_a=UpperCAmelCase__ ,already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
UpperCAmelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ ,'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
import math
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Optional[int] = 0
__snake_case : List[str] = 0
while num > 0:
__snake_case : List[str] = num % 8
__snake_case : Any = octal + (remainder * math.floor(math.pow(10 , __UpperCAmelCase ) ))
counter += 1
__snake_case : Tuple = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__UpperCAmelCase )}"""
def UpperCAmelCase__( ):
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 576 | import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = (DDIMParallelScheduler,)
__UpperCAmelCase = (("eta", 0.0), ("num_inference_steps", 5_0))
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : List[str] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_UpperCAmelCase )
return config
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : int = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config(**_UpperCAmelCase )
__snake_case : Any = scheduler_class(**_UpperCAmelCase )
__snake_case , __snake_case : List[str] = 10, 0.0
__snake_case : str = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for t in scheduler.timesteps:
__snake_case : Tuple = model(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowercase_ ( self ):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase_ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
__snake_case : str = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowercase_ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowercase_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowercase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowercase_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def lowercase_ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_UpperCAmelCase )
def lowercase_ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_UpperCAmelCase )
def lowercase_ ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def lowercase_ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_UpperCAmelCase )
def lowercase_ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase )
def lowercase_ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_UpperCAmelCase , eta=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def lowercase_ ( self ):
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : Any = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**_UpperCAmelCase )
__snake_case , __snake_case : List[str] = 10, 0.0
scheduler.set_timesteps(_UpperCAmelCase )
__snake_case : List[Any] = self.dummy_model()
__snake_case : Any = self.dummy_sample_deter
__snake_case : Union[str, Any] = self.dummy_sample_deter + 0.1
__snake_case : Dict = self.dummy_sample_deter - 0.1
__snake_case : Optional[int] = samplea.shape[0]
__snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : List[str] = torch.arange(_UpperCAmelCase )[0:3, None].repeat(1 , _UpperCAmelCase )
__snake_case : str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Dict = scheduler.batch_step_no_noise(_UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCAmelCase )
__snake_case : List[str] = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Optional[int] = self.full_loop()
__snake_case : str = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Any = self.full_loop(prediction_type='v_prediction' )
__snake_case : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : str = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowercase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowercase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : Optional[int] = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 576 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowerCAmelCase( self , __lowerCAmelCase=0 ) -> Any:
lowercase__ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCAmelCase ) )
lowercase__ : List[Any] = torch.manual_seed(__lowerCAmelCase )
lowercase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Any = self.get_dummy_inputs()
lowercase__ : List[str] = pipe(**__lowerCAmelCase ).images
lowercase__ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowercase__ : int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : int = self.get_dummy_inputs()
lowercase__ : int = pipe(**__lowerCAmelCase ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : str = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Dict = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**__lowerCAmelCase ).images
lowercase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Dict = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCAmelCase( self ) -> Any:
lowercase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[Any] = self.get_dummy_inputs()
lowercase__ : Union[str, Any] = pipe(**__lowerCAmelCase ).images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCAmelCase( self ) -> str:
lowercase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : int = self.get_dummy_inputs()
lowercase__ : Dict = pipe(**__lowerCAmelCase ).images
lowercase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : str = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[Any] = ort.SessionOptions()
lowercase__ : Union[str, Any] = False
return options
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase__ : Optional[Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowercase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Any = '''A fantasy landscape, trending on artstation'''
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : int = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type='''np''' , )
lowercase__ : Any = output.images
lowercase__ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ : str = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase__ : Optional[Any] = init_image.resize((128, 128) )
lowercase__ : str = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
lowercase__ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : str = '''A fantasy landscape, trending on artstation'''
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Any = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCAmelCase , output_type='''np''' , )
lowercase__ : Dict = output.images
lowercase__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ : int = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 428 | '''simple docstring'''
__a: Dict = """Tobias Carryer"""
from time import time
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=int(time() ) ) -> List[Any]: # noqa: B008
lowercase__ : Dict = multiplier
lowercase__ : Dict = increment
lowercase__ : str = modulo
lowercase__ : Optional[Any] = seed
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Optional[Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__a: Dict = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 428 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : str = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = """unispeech"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : List[str]=7_6_8 , UpperCamelCase_ : Optional[int]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : List[Any]=3_0_7_2 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : Any="group" , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Optional[Any]=1_2_8 , UpperCamelCase_ : List[Any]=1_6 , UpperCamelCase_ : str=False , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=0.05 , UpperCamelCase_ : str=1_0 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : Optional[Any]=1_0 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=3_2_0 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Tuple=1_0_0 , UpperCamelCase_ : Union[str, Any]=2_5_6 , UpperCamelCase_ : Any=2_5_6 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Optional[Any]="mean" , UpperCamelCase_ : int=False , UpperCamelCase_ : int=False , UpperCamelCase_ : Dict=2_5_6 , UpperCamelCase_ : Dict=8_0 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Optional[int]=0.5 , **UpperCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__magic_name__ = hidden_size
__magic_name__ = feat_extract_norm
__magic_name__ = feat_extract_activation
__magic_name__ = list(UpperCamelCase_ )
__magic_name__ = list(UpperCamelCase_ )
__magic_name__ = list(UpperCamelCase_ )
__magic_name__ = conv_bias
__magic_name__ = num_conv_pos_embeddings
__magic_name__ = num_conv_pos_embedding_groups
__magic_name__ = len(self.conv_dim )
__magic_name__ = num_hidden_layers
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = num_attention_heads
__magic_name__ = hidden_dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = feat_proj_dropout
__magic_name__ = final_dropout
__magic_name__ = layerdrop
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = num_ctc_classes
__magic_name__ = vocab_size
__magic_name__ = do_stable_layer_norm
__magic_name__ = use_weighted_layer_sum
__magic_name__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ = apply_spec_augment
__magic_name__ = mask_time_prob
__magic_name__ = mask_time_length
__magic_name__ = mask_time_min_masks
__magic_name__ = mask_feature_prob
__magic_name__ = mask_feature_length
__magic_name__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ = num_codevectors_per_group
__magic_name__ = num_codevector_groups
__magic_name__ = contrastive_logits_temperature
__magic_name__ = feat_quantizer_dropout
__magic_name__ = num_negatives
__magic_name__ = codevector_dim
__magic_name__ = proj_codevector_dim
__magic_name__ = diversity_loss_weight
# ctc loss
__magic_name__ = ctc_loss_reduction
__magic_name__ = ctc_zero_infinity
# pretraining loss
__magic_name__ = replace_prob
@property
def a__ ( self : Any ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 545 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case : List[str] = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def A ( __snake_case: Optional[Any] , __snake_case: Tuple , __snake_case: Any ) -> int:
"""simple docstring"""
__magic_name__ = SavedModel()
__magic_name__ = []
with open(os.path.join(__snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__magic_name__ = json.load(__snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__snake_case )] )
with open(__snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__magic_name__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__magic_name__ = sorted(__snake_case )
__magic_name__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__snake_case )
if strict and len(__snake_case ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__snake_case ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*__snake_case , sep='\n' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=1_2, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
snake_case : Any = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 545 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> bytes:
_UpperCamelCase : List[Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
_UpperCamelCase : Tuple = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter Video/IGTV url: """).strip()
_SCREAMING_SNAKE_CASE = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f'Done. Video saved to disk as {file_name}.')
| 239 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """data2vec-text"""
def __init__(self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : List[Any] = type_vocab_size
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : str = classifier_dropout
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def lowercase_ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 239 | 1 |
from functools import reduce
UpperCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _snake_case , _snake_case : str(int(_snake_case ) * int(_snake_case ) ) , n[i : i + 13] ) )
for i in range(len(_snake_case ) - 12 ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowercase : str =logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowercase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowercase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
A : Tuple =self.task_name.lower()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "train"
lowercase : int = "dev"
lowercase : Union[str, Any] = "test"
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : GlueDataTrainingArguments
lowercase : str
lowercase : List[InputFeatures]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : GlueDataTrainingArguments , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Split] = Split.train , SCREAMING_SNAKE_CASE__ : Optional[str] = None , ) -> List[Any]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , SCREAMING_SNAKE_CASE__ , )
A : Any =args
A : Union[str, Any] =glue_processors[args.task_name]()
A : Union[str, Any] =glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
A : Any =Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
A : Tuple =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
A : Optional[Any] =self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A , A : str =label_list[2], label_list[1]
A : Tuple =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A : int =cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
A : Optional[Any] =time.time()
A : str =torch.load(SCREAMING_SNAKE_CASE__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
A : int =self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A : Dict =self.processor.get_test_examples(args.data_dir )
else:
A : Optional[Any] =self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A : Optional[int] =examples[:limit_length]
A : int =glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE__ , output_mode=self.output_mode , )
A : List[Any] =time.time()
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
return self.label_list
| 305 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
A : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
A : str = "text"
A : str = "summary"
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 711 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _a :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *A, **A ):
'''simple docstring'''
pass
def lowercase__( __UpperCamelCase: Image ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase__( __UpperCamelCase: Image ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
A : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MaskGenerationPipeline(model=A, image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = pipeline('mask-generation', model='facebook/sam-vit-huge' )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg', points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : Any = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A, decimals=4 ), [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
], )
# fmt: on
@require_torch
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'facebook/sam-vit-huge'
SCREAMING_SNAKE_CASE : int = pipeline('mask-generation', model=A )
SCREAMING_SNAKE_CASE : Optional[Any] = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg', pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A, decimals=4 ), [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
], )
| 508 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
lowerCAmelCase_ = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : int = SqueezeBertTokenizer
def __init__( self : List[Any] , _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : Dict="[UNK]" , _UpperCamelCase : int="[SEP]" , _UpperCamelCase : int="[PAD]" , _UpperCamelCase : List[str]="[CLS]" , _UpperCamelCase : List[Any]="[MASK]" , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Optional[int] , ) ->List[str]:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=None ) ->Optional[int]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 39 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = SqueezeBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__A = model(UpperCamelCase_ )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase_ )
__A = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
| 637 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 295 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 295 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.dummy_uncond_unet
lowercase = ScoreSdeVeScheduler()
lowercase = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
lowercase = torch.manual_seed(0 )
lowercase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=snake_case ).images
lowercase = torch.manual_seed(0 )
lowercase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=snake_case , return_dict=snake_case )[
0
]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'google/ncsnpp-church-256'
lowercase = UNetaDModel.from_pretrained(snake_case )
lowercase = ScoreSdeVeScheduler.from_pretrained(snake_case )
lowercase = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
lowercase = torch.manual_seed(0 )
lowercase = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=snake_case ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 84 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def a_ ( _A , _A , _A ) -> str:
"""simple docstring"""
snake_case__ = np.dot(_A , _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def a_ ( _A , _A , _A , _A=70000 ) -> Optional[int]:
"""simple docstring"""
snake_case__ = np.zeros(x.shape[1] )
for iterations in range(_A ):
snake_case__ = np.dot(_A , _A )
snake_case__ = sigmoid_function(_A )
snake_case__ = np.dot(x.T , h - y ) / y.size
snake_case__ = theta - alpha * gradient # updating the weights
snake_case__ = np.dot(_A , _A )
snake_case__ = sigmoid_function(_A )
snake_case__ = cost_function(_A , _A )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCamelCase : Dict = datasets.load_iris()
__UpperCamelCase : List[Any] = iris.data[:, :2]
__UpperCamelCase : int = (iris.target != 0) * 1
__UpperCamelCase : str = 0.1
__UpperCamelCase : int = logistic_reg(alpha, x, y, max_iterations=70000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
return sigmoid_function(
np.dot(_A , _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((__UpperCamelCase) , (__UpperCamelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__UpperCamelCase) , (__UpperCamelCase)) : List[Any] = (x[:, 1].min(), x[:, 1].max())
((__UpperCamelCase) , (__UpperCamelCase)) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCamelCase : List[str] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 328 | 0 |
from __future__ import annotations
import math
def _lowerCamelCase ( __A : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE = [num for num in range(3, 100001, 2) if not is_prime(num)]
def _lowerCamelCase ( __A : int ) -> list[int]:
if not isinstance(__A , __A ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCAmelCase : Tuple = []
for num in range(len(__A ) ):
_UpperCAmelCase : List[str] = 0
while 2 * i * i <= odd_composites[num]:
_UpperCAmelCase : Dict = odd_composites[num] - 2 * i * i
if is_prime(__A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__A ) == n:
return list_nums
return []
def _lowerCamelCase ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 186 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , **_A) -> int:
"""simple docstring"""
super().__init__(**_A)
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
# No specific FOR_XXX available yet
def __call__( self , _A , **_A) -> List[str]:
"""simple docstring"""
return super().__call__(_A , **_A)
def snake_case__ ( self , **_A) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def snake_case__ ( self , _A , _A=None , _A="This is a sound of {}.") -> Tuple:
"""simple docstring"""
if isinstance(_A , _A):
if audio.startswith('''http://''') or audio.startswith('''https://'''):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCAmelCase : List[Any] = requests.get(_A).content
else:
with open(_A , '''rb''') as f:
_UpperCAmelCase : List[str] = f.read()
if isinstance(_A , _A):
_UpperCAmelCase : List[Any] = ffmpeg_read(_A , self.feature_extractor.sampling_rate)
if not isinstance(_A , np.ndarray):
raise ValueError('''We expect a numpy ndarray as input''')
if len(audio.shape) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''')
_UpperCAmelCase : Optional[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''')
_UpperCAmelCase : Dict = candidate_labels
_UpperCAmelCase : List[str] = [hypothesis_template.format(_A) for x in candidate_labels]
_UpperCAmelCase : Optional[Any] = self.tokenizer(_A , return_tensors=self.framework , padding=_A)
_UpperCAmelCase : str = [text_inputs]
return inputs
def snake_case__ ( self , _A) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = model_inputs.pop('''candidate_labels''')
_UpperCAmelCase : Union[str, Any] = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , _A):
_UpperCAmelCase : int = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : Dict = text_inputs[0][0]
_UpperCAmelCase : str = self.model(**_A , **_A)
_UpperCAmelCase : str = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def snake_case__ ( self , _A) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = model_outputs.pop('''candidate_labels''')
_UpperCAmelCase : str = model_outputs['''logits'''][0]
if self.framework == "pt":
_UpperCAmelCase : int = logits.softmax(dim=0)
_UpperCAmelCase : Optional[Any] = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''')
_UpperCAmelCase : List[str] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_A , _A) , key=lambda _A: -x[0])
]
return result
| 186 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spm_char.model"}
__A = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__A = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
__lowerCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [1]
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + suffix_ones
return ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , 'wb' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 469 |
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : int ) -> list:
"""simple docstring"""
__lowerCamelCase = word.split()
def justify(UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
__lowerCamelCase = max_width - width
__lowerCamelCase = len(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__lowerCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__lowerCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__lowerCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase__ ):
num_spaces_between_words_list[i] += 1
__lowerCamelCase = []
for i in range(UpperCamelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = 0
for word in words:
if width + len(UpperCamelCase__ ) + len(UpperCamelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase__ )
width += len(UpperCamelCase__ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
# reset new line and new width
__lowerCamelCase , __lowerCamelCase = [word], len(UpperCamelCase__ )
__lowerCamelCase = max_width - width - len(UpperCamelCase__ )
answer.append(' '.join(UpperCamelCase__ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 469 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Any = config_class
UpperCAmelCase : Union[str, Any] = has_text_modality
UpperCAmelCase : int = kwargs
UpperCAmelCase : Dict = common_properties
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict )
UpperCAmelCase : List[str] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_SCREAMING_SNAKE_CASE ):
try:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , msg=F"`{name} value {idx} expected, but was {getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_SCREAMING_SNAKE_CASE ):
try:
UpperCAmelCase : Union[str, Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , msg=F"`{name} value {idx} expected, but was {getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any = self.config_class(**self.inputs_dict )
UpperCAmelCase : str = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , """config.json""" )
config_first.to_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.config_class.from_json_file(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.config_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self.config_class(**self.inputs_dict )
UpperCAmelCase : Optional[int] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
config_first.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.config_class.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCAmelCase : Any = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
if self.config_class.is_composition:
return
UpperCAmelCase : Tuple = self.config_class()
self.parent.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.config_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != value:
wrong_values.append((key, getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), value) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase : Dict = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 359 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = XLMTokenizer
__lowerCAmelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase : str = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : List[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """lower newer"""
UpperCAmelCase : Dict = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase : Optional[int] = """lower"""
UpperCAmelCase : Optional[int] = ["""low""", """er</w>"""]
UpperCAmelCase : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = tokens + ["""<unk>"""]
UpperCAmelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCAmelCase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 359 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = KandinskyImgaImgPipeline
_UpperCamelCase : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : Dict = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Optional[int] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Tuple = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 1_0_0
@property
def __a ( self ):
_lowercase : int = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_lowercase : Any = MultilingualCLIP(_lowerCAmelCase )
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Tuple = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def __a ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ):
_lowercase : Dict = self.dummy_text_encoder
_lowercase : Dict = self.dummy_tokenizer
_lowercase : Any = self.dummy_unet
_lowercase : Optional[int] = self.dummy_movq
_lowercase : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : int = DDIMScheduler(**_lowerCAmelCase )
_lowercase : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Union[str, Any] = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : str = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Optional[int] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : List[Any] = 'cpu'
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**_lowerCAmelCase )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Dict = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : Tuple = output.images
_lowercase : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowercase : Union[str, Any] = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
_lowercase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
_lowercase : Tuple = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase , _lowercase : Any = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowercase : Any = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: SplitDict ) -> Optional[int]:
'''simple docstring'''
A__ = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
A__ = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A__ = None
# the split name of split_dict takes over the name of the split info object
A__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 514 | 0 |
def _a ( __lowercase , __lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 |
def _a ( __lowercase ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 1 |
UpperCamelCase__ = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 486 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
UpperCamelCase__ = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['input_ids', 'attention_mask']
def __init__(self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : Union[str, Any]=[] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ (self : Dict ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__(self : Dict , __UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ (self : int , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.piece_to_id(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
UpperCAmelCase__ = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase_ (self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 486 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_snake_case = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''RUCAIBox/mvp''': 10_24,
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = VOCAB_FILES_NAMES
__A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = ["input_ids", "attention_mask"]
__A : str = MvpTokenizer
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ):
"""simple docstring"""
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
lowerCamelCase : Union[str, Any] = getattr(__A , pre_tok_state.pop("type" ) )
lowerCamelCase : Union[str, Any] = add_prefix_space
lowerCamelCase : List[Any] = pre_tok_class(**__A )
lowerCamelCase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase : int = "post_processor"
lowerCamelCase : Optional[int] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase : Dict = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase : Optional[Any] = tuple(state["cls"] )
lowerCamelCase : Tuple = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
lowerCamelCase : List[Any] = add_prefix_space
lowerCamelCase : int = True
if state.get("trim_offsets" , __A ) != trim_offsets:
lowerCamelCase : Tuple = trim_offsets
lowerCamelCase : Any = True
if changes_to_apply:
lowerCamelCase : List[str] = getattr(__A , state.pop("type" ) )
lowerCamelCase : str = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def _snake_case ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
lowerCamelCase : str = value
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : Any = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : int = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _snake_case ( self , __A , __A=None ):
"""simple docstring"""
lowerCamelCase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Tuple = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 708 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase : int = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase : Optional[int] = get_activation("gelu" )
lowerCamelCase : List[str] = get_activation("gelu_10" )
lowerCamelCase : str = torch_builtin(__A )
lowerCamelCase : Any = geluaa(__A )
lowerCamelCase : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self ):
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = get_activation("gelu" )
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : str = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
lowerCamelCase : Union[str, Any] = acta.a
| 231 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_UpperCAmelCase = 0
_UpperCAmelCase = str(__snake_case )
while len(__snake_case ) != 1:
_UpperCAmelCase = [int(__snake_case ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(__snake_case ) ):
total *= numbers[i]
_UpperCAmelCase = str(__snake_case )
steps += 1
return steps
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_UpperCAmelCase = 0
_UpperCAmelCase = str(__snake_case )
while len(__snake_case ) != 1:
_UpperCAmelCase = [int(__snake_case ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(__snake_case ) ):
total += numbers[i]
_UpperCAmelCase = str(__snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
_UpperCamelCase : Optional[Any] = Path(UpperCAmelCase_ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
_UpperCamelCase : Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , UpperCAmelCase_ , with_cuda=UpperCAmelCase_ , extra_include_paths=[str(UpperCAmelCase_ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 195 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__A = object()
# For specifying empty leaf dict `{}`
__A = object()
def UpperCamelCase ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ):
__a = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
__a = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ):
def replace(_lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def UpperCamelCase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase ( _lowerCAmelCase : str ):
__a = _get_partition_rules()
__a = _replacement_rules(_UpperCamelCase )
__a = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
__a = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 721 | """simple docstring"""
def UpperCamelCase ( ):
return 1
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int = 200 ):
return two_pound(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 173 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=30 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_=None , UpperCamelCase_=2 , ) -> List[Any]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = scope
snake_case__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ = (image_size // patch_size) ** 2
snake_case__ = num_patches + 1
def _snake_case ( self ) -> Tuple:
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Union[str, Any]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
snake_case__ = ViTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
snake_case__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
snake_case__ = ViTForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
snake_case__ = model(UpperCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = ViTForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
snake_case__ = self.type_sequence_label_size
snake_case__ = ViTForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
snake_case__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = ViTForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__lowerCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _snake_case ( self ) -> List[str]:
snake_case__ = ViTModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def _snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def _snake_case ( self ) -> Optional[Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(UpperCamelCase_ )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def _snake_case ( self ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _snake_case ( self ) -> List[Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def _snake_case ( self ) -> Optional[int]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def _snake_case ( self ) -> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = ViTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __lowerCamelCase ( ) ->Tuple:
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Tuple:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(UpperCamelCase_ )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
snake_case__ = model(**UpperCamelCase_ )
# verify the logits
snake_case__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
snake_case__ = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def _snake_case ( self ) -> List[str]:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
snake_case__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(UpperCamelCase_ )
snake_case__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
snake_case__ = prepare_img()
snake_case__ = image_processor(images=UpperCamelCase_ , return_tensors='pt' )
snake_case__ = inputs.pixel_values.to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
snake_case__ = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ )
# verify the logits
snake_case__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase_ )
snake_case__ = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _snake_case ( self ) -> int:
snake_case__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=UpperCamelCase_ , return_tensors='pt' )
snake_case__ = inputs.pixel_values.to(UpperCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case__ = model(UpperCamelCase_ )
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Any = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = '''xlm'''
__lowerCAmelCase = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , UpperCamelCase_=3_0145 , UpperCamelCase_=2048 , UpperCamelCase_=12 , UpperCamelCase_=16 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=512 , UpperCamelCase_=2048**-0.5 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=5 , UpperCamelCase_=True , UpperCamelCase_="first" , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=0 , **UpperCamelCase_ , ) -> List[str]:
snake_case__ = vocab_size
snake_case__ = emb_dim
snake_case__ = n_layers
snake_case__ = n_heads
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = gelu_activation
snake_case__ = sinusoidal_embeddings
snake_case__ = causal
snake_case__ = asm
snake_case__ = n_langs
snake_case__ = use_lang_emb
snake_case__ = layer_norm_eps
snake_case__ = bos_index
snake_case__ = eos_index
snake_case__ = pad_index
snake_case__ = unk_index
snake_case__ = mask_index
snake_case__ = is_encoder
snake_case__ = max_position_embeddings
snake_case__ = embed_init_std
snake_case__ = init_std
snake_case__ = summary_type
snake_case__ = summary_use_proj
snake_case__ = summary_activation
snake_case__ = summary_proj_to_labels
snake_case__ = summary_first_dropout
snake_case__ = start_n_top
snake_case__ = end_n_top
snake_case__ = mask_token_id
snake_case__ = lang_id
if "n_words" in kwargs:
snake_case__ = kwargs['n_words']
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
class __snake_case ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 368 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( _a : Namespace ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase : Dict = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_SCREAMING_SNAKE_CASE , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
snake_case_ : Any = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
snake_case_ : List[Any] = model_type
snake_case_ : Optional[int] = tf_checkpoint
snake_case_ : Tuple = pytorch_dump_output
snake_case_ : List[str] = config
snake_case_ : List[str] = finetuning_task_name
def _lowerCAmelCase ( self ) -> List[str]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
if "ckpt" in self._tf_checkpoint.lower():
snake_case_ : str = self._tf_checkpoint
snake_case_ : Tuple = ""
else:
snake_case_ : str = self._tf_checkpoint
snake_case_ : int = ""
convert_transfo_xl_checkpoint_to_pytorch(
_SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , _SCREAMING_SNAKE_CASE )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 114 |
def lowerCAmelCase__ ( _a : int ):
snake_case_ : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase__ ( _a : int ):
snake_case_ : List[str] = 0
while number > 0:
snake_case_ : Dict = number % 10
sum_of_digits += last_digit
snake_case_ : List[Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase__ ( _a : int = 1_00 ):
snake_case_ : Optional[Any] = factorial(_a )
snake_case_ : Optional[int] = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 114 | 1 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
__magic_name__ : int = quote(UpperCamelCase__ )
return hfh.hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" , revision=UpperCamelCase__ ) | 436 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if len(UpperCamelCase__ ) != 32:
raise ValueError("Input must be of length 32" )
__magic_name__ : int = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__magic_name__ : Dict = format(UpperCamelCase__ , "08x" )[-8:]
__magic_name__ : str = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Optional[Any] = B""
for char in message:
bit_string += format(UpperCamelCase__ , "08b" ).encode("utf-8" )
__magic_name__ : Any = format(len(UpperCamelCase__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if len(UpperCamelCase__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(UpperCamelCase__ ) , 512 ):
__magic_name__ : int = bit_string[pos : pos + 512]
__magic_name__ : Any = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__magic_name__ : Optional[Any] = format(UpperCamelCase__ , "032b" )
__magic_name__ : List[str] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return (a + b) % 2**32
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : int = preprocess(UpperCamelCase__ )
__magic_name__ : List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__magic_name__ : Union[str, Any] = 0X6_7_4_5_2_3_0_1
__magic_name__ : Dict = 0XE_F_C_D_A_B_8_9
__magic_name__ : Union[str, Any] = 0X9_8_B_A_D_C_F_E
__magic_name__ : str = 0X1_0_3_2_5_4_7_6
__magic_name__ : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
__magic_name__ : Optional[int] = aa
__magic_name__ : str = ba
__magic_name__ : Optional[Any] = ca
__magic_name__ : str = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__magic_name__ : List[Any] = d ^ (b & (c ^ d))
__magic_name__ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__magic_name__ : Tuple = c ^ (d & (b ^ c))
__magic_name__ : int = (5 * i + 1) % 16
elif i <= 47:
__magic_name__ : Optional[Any] = b ^ c ^ d
__magic_name__ : Optional[int] = (3 * i + 5) % 16
else:
__magic_name__ : int = c ^ (b | not_aa(UpperCamelCase__ ))
__magic_name__ : Optional[Any] = (7 * i) % 16
__magic_name__ : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
__magic_name__ : Dict = d
__magic_name__ : Optional[Any] = c
__magic_name__ : Optional[int] = b
__magic_name__ : Any = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__magic_name__ : List[Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : int = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : Tuple = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : List[Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : Dict = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 436 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_UpperCamelCase : Union[str, Any] = list[list[float | int]]
def a_ ( _lowerCAmelCase : Matrix , _lowerCAmelCase : Matrix ):
'''simple docstring'''
lowercase__ : int = len(a_ )
lowercase__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(a_ )]
lowercase__ : int
lowercase__ : int
lowercase__ : int
lowercase__ : int
lowercase__ : int
lowercase__ : float
for row in range(a_ ):
for col in range(a_ ):
lowercase__ : Any = matrix[row][col]
lowercase__ : Optional[Any] = vector[row][0]
lowercase__ : int = 0
lowercase__ : int = 0
while row < size and col < size:
# pivoting
lowercase__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(a_ , a_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase__ : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , a_ ):
lowercase__ : Tuple = augmented[rowa][col] / augmented[row][col]
lowercase__ : Union[str, Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , a_ ):
for row in range(a_ ):
lowercase__ : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(a_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(a_ )
]
def a_ ( _lowerCAmelCase : list[int] ):
'''simple docstring'''
lowercase__ : int = len(a_ )
lowercase__ : Matrix = [[0 for _ in range(a_ )] for _ in range(a_ )]
lowercase__ : Matrix = [[0] for _ in range(a_ )]
lowercase__ : Matrix
lowercase__ : int
lowercase__ : int
lowercase__ : int
for x_val, y_val in enumerate(a_ ):
for col in range(a_ ):
lowercase__ : str = (x_val + 1) ** (size - col - 1)
lowercase__ : Dict = y_val
lowercase__ : Union[str, Any] = solve(a_ , a_ )
def interpolated_func(_lowerCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(a_ ) )
return interpolated_func
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def a_ ( _lowerCAmelCase : Callable[[int], int] = question_function , _lowerCAmelCase : int = 10 ):
'''simple docstring'''
lowercase__ : list[int] = [func(a_ ) for x_val in range(1 , order + 1 )]
lowercase__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase__ : int = 0
lowercase__ : Callable[[int], int]
lowercase__ : int
for poly in polynomials:
lowercase__ : Tuple = 1
while func(a_ ) == poly(a_ ):
x_val += 1
ret += poly(a_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 713 | """simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Union[str, Any] = logging.get_logger(__name__)
__a : int = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = '''vivit'''
def __init__( self , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=[2, 16, 16] , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_fast" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-06 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = num_frames
UpperCamelCase = tubelet_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE )
| 606 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_A = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 158 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 706 |
from scipy.stats import spearmanr
import datasets
A__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
A__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
A__ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict=False ):
snake_case__ : str = spearmanr(__lowercase ,__lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 219 | 0 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_SCREAMING_SNAKE_CASE : List[str] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
_SCREAMING_SNAKE_CASE : int = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
_SCREAMING_SNAKE_CASE : Dict = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase ( self : Dict ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=False ) -> Tuple:
if return_pvalue:
lowerCamelCase_ = pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] )}
| 549 | 0 |
import baseaa
def __lowercase ( _UpperCAmelCase ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
return baseaa.aaadecode(_UpperCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 704 | from __future__ import annotations
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
for j in range(_UpperCAmelCase ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list[float]:
'''simple docstring'''
__lowercase = [float("inf" )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('Enter number of vertices: ').strip())
lowerCAmelCase__ = int(input('Enter number of edges: ').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
lowerCAmelCase__ = {'src': src, 'dst': dest, 'weight': weight}
lowerCAmelCase__ = int(input('\nEnter shortest path source:').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 576 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = GPTSwaTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = GPTSwaTokenizer(UpperCAmelCase__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = '''This is a test'''
A__ = '''This is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = '''<s>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(UpperCAmelCase__) , 2_000)
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ = GPTSwaTokenizer(UpperCAmelCase__)
A__ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [465, 287, 265, 631, 842])
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
UpperCAmelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
self.assertListEqual(
UpperCAmelCase__ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__)
# fmt: off
self.assertListEqual(
UpperCAmelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
A__ = GPTSwaTokenizer(UpperCAmelCase__)
A__ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
A__ = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertListEqual(tokenizer.encode_fast(UpperCAmelCase__) , UpperCAmelCase__)
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertEqual(tokenizer.decode_fast(UpperCAmelCase__) , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
A__ = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=UpperCAmelCase__ , )
| 87 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = embedding_size
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = self.num_choices
A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__)
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertModelTest.TFMobileBertModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
A__ = tf.constant([[0, 1, 2, 3, 4, 5]])
A__ = model(UpperCAmelCase__)[0]
A__ = [1, 6, 30_522]
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
| 87 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE( __snake_case ):
def __init__( self: Union[str, Any] , *UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: List[str]=None , **UpperCamelCase: Dict ) -> List[Any]:
super().__init__(*_lowercase , **_lowercase )
snake_case__ = eval_examples
snake_case__ = post_process_function
def lowerCAmelCase_ ( self: int , UpperCamelCase: str = None , UpperCamelCase: List[str]=None , UpperCamelCase: Any = None , UpperCamelCase: Any = "eval" , **UpperCamelCase: List[Any] , ) -> int:
snake_case__ = gen_kwargs.copy()
snake_case__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
snake_case__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
snake_case__ = gen_kwargs
snake_case__ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case__ = self.get_eval_dataloader(_lowercase )
snake_case__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = time.time()
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
_lowercase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
snake_case__ = compute_metrics
snake_case__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case__ = self.post_process_function(_lowercase , _lowercase , _lowercase )
snake_case__ = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
snake_case__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Dict = "test" , **UpperCamelCase: str ) -> Tuple:
snake_case__ = gen_kwargs.copy()
snake_case__ = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = time.time()
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
_lowercase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
snake_case__ = compute_metrics
snake_case__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case__ = self.post_process_function(_lowercase , _lowercase , _lowercase , 'predict' )
snake_case__ = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 719 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case__ = model_type_to_module_name(_A )
snake_case__ = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '__name__' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case__ = importlib.import_module('transformers' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def a_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_A , encoding='utf-8' ) as reader:
return json.load(_A )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] ) -> Union[str, Any]:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def lowerCAmelCase_ ( cls: int , UpperCamelCase: int , **UpperCamelCase: str ) -> Optional[Any]:
snake_case__ = kwargs.pop('config' , UpperCamelCase )
snake_case__ = kwargs.pop('trust_remote_code' , UpperCamelCase )
snake_case__ = True
snake_case__ , snake_case__ = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase )
snake_case__ = config_dict.get('image_processor_type' , UpperCamelCase )
snake_case__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case__ = config_dict.pop('feature_extractor_type' , UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
snake_case__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoFeatureExtractor']
snake_case__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.image_processor_type``
snake_case__ = getattr(UpperCamelCase , 'image_processor_type' , UpperCamelCase )
if hasattr(UpperCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
snake_case__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
snake_case__ = image_processor_class_from_name(UpperCamelCase )
snake_case__ = image_processor_auto_map is not None
snake_case__ = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
snake_case__ = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
snake_case__ = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
snake_case__ = kwargs.pop('code_revision' , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
snake_case__ = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )]
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Optional[Any] , UpperCamelCase: int ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 372 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __UpperCamelCase( _A : Optional[Any]=32 , _A : List[Any]=10 , _A : Optional[Any]=1_00 , _A : List[str]=10_26 , _A : List[Any]=True , _A : Union[str, Any]="data/tokenized_stories_train_wikitext103.jbl" , _A : Union[str, Any]="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = generate_datasets(
_A , _A , number=_A , min_len=10_26 , trim=_A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase__ : int = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
UpperCAmelCase__ : Optional[int] = load_gpta('''gpt2''' ).to(_A )
print('''computing perplexity on objective set''' )
UpperCAmelCase__ : int = compute_perplexity(_A , _A , _A ).item()
print('''perplexity on objective set:''' , _A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_A , _A , _A , _A , _A , _A , _A , _A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __UpperCamelCase( _A : Dict , _A : Tuple=15 , _A : str=1_28 , _A : Tuple=1_00 , _A : List[Any]="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
UpperCAmelCase__ : Dict = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase__ : str = SecondaryLearner(_A )
# Train secondary learner
UpperCAmelCase__ : Optional[int] = train_secondary_learner(
_A , _A , max_epochs=_A , batch_size=_A , eval_freq=1_00 , igf_model_path=_A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __UpperCamelCase( _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Optional[int]=32 , _A : Any=10_00 , _A : Tuple=16 , _A : Tuple=1.0 , _A : List[str]=recopy_gpta , _A : int=None , _A : Optional[int]=10 , _A : Tuple="gpt2_finetuned.pt" , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase__ : Union[str, Any] = RandomSampler(_A )
UpperCAmelCase__ : str = DataLoader(_A , sampler=_A )
UpperCAmelCase__ : str = max_steps // (len(_A )) + 1
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Tuple = torch.zeros((1, context_len) , dtype=torch.long , device=_A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = recopy_model(_A , _A , _A )
model.train()
if secondary_learner is not None:
secondary_learner.to(_A )
secondary_learner.eval()
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase__ : Any = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print('''Test perplexity, step''' , _A , ''':''' , _A )
for epoch in range(int(_A ) ):
for step, example in enumerate(_A ):
torch.cuda.empty_cache()
UpperCAmelCase__ : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCAmelCase__ : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase__ : Union[str, Any] = model(_A , labels=_A )
UpperCAmelCase__ : Dict = True
if secondary_learner is not None:
UpperCAmelCase__ : Tuple = secondary_learner.forward(
torch.tensor(_A , dtype=torch.long , device=_A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase__ : Tuple = -1
if predicted_q < threshold:
UpperCAmelCase__ : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase__ : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase__ : str = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print('''Test perplexity, step''' , _A , ''':''' , _A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_A , type=_A , required=_A , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_A , default=_A , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_A , default=_A , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_A , type=_A , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_A , default=_A , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=_A , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=_A , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=_A , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=_A , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_A , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_A , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=_A , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=_A , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_A , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_A , type=_A , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_A , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_A , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_A , type=_A , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_A , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
UpperCAmelCase__ : Any = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
UpperCAmelCase__ : List[str] = training_secondary_learner(
_A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
UpperCAmelCase__ : Union[str, Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase__ , UpperCAmelCase__ : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=_A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_A , _A , _A , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_A , secondary_learner=_A , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 614 | '''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCamelCase( _A : List[str] , _A : Optional[int] , _A : Dict=8 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase__ : Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,movq=lowerCamelCase_ ,)
UpperCAmelCase__ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
if latents is None:
UpperCAmelCase__ : Dict = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ : Optional[Any] = latents.to(lowerCamelCase_ )
UpperCAmelCase__ : Dict = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1
# get prompt text embeddings
UpperCAmelCase__ : Tuple = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,truncation=lowerCamelCase_ ,max_length=77 ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : List[Any] = text_inputs.input_ids
UpperCAmelCase__ : List[str] = self.tokenizer(lowerCamelCase_ ,padding='''longest''' ,return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase__ : Tuple = text_input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = text_inputs.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : str = text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : List[str]
if negative_prompt is None:
UpperCAmelCase__ : List[Any] = [''''''] * batch_size
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !='''
f''' {type(lowerCamelCase_ )}.''' )
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Dict = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase__ : Dict = negative_prompt
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
lowerCamelCase_ ,padding='''max_length''' ,max_length=77 ,truncation=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_tensors='''pt''' ,)
UpperCAmelCase__ : Optional[Any] = uncond_input.input_ids.to(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = uncond_input.attention_mask.to(lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.text_encoder(
input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ )
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase__ : int = uncond_text_encoder_hidden_states.repeat(1 ,lowerCamelCase_ ,1 )
UpperCAmelCase__ : List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 )
UpperCAmelCase__ : Union[str, Any] = uncond_text_mask.repeat_interleave(lowerCamelCase_ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase__ : List[str] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase__ : Dict = torch.device(f'''cuda:{gpu_id}''' )
UpperCAmelCase__ : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase__ : str = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' ,silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ : Optional[Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cpu_offload_with_hook(lowerCamelCase_ ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
if self.safety_checker is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cpu_offload_with_hook(self.safety_checker ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
UpperCAmelCase__ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet ,'''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ ,'''_hf_hook''' )
and hasattr(module._hf_hook ,'''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 512 ,lowerCamelCase_ = 100 ,lowerCamelCase_ = 4.0 ,lowerCamelCase_ = 1 ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = 1
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = len(lowerCamelCase_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
UpperCAmelCase__ : Union[str, Any] = self._execution_device
UpperCAmelCase__ : Optional[int] = batch_size * num_images_per_prompt
UpperCAmelCase__ : Dict = guidance_scale > 1.0
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._encode_prompt(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = torch.cat(lowerCamelCase_ ,dim=0 )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase__ : int = torch.cat(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : Optional[Any] = image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
UpperCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ ,device=lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.scheduler.timesteps
UpperCAmelCase__ : List[Any] = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = get_new_h_w(lowerCamelCase_ ,lowerCamelCase_ ,self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : Tuple = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCAmelCase__ : int = self.unet(
sample=lowerCamelCase_ ,timestep=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,added_cond_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = noise_pred.split(latents.shape[1] ,dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = variance_pred.chunk(2 )
UpperCAmelCase__ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ : List[str] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : Tuple = self.scheduler.step(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ,).prev_sample
# post-processing
UpperCAmelCase__ : Optional[int] = self.movq.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ : Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase__ : Any = image.clamp(0 ,1 )
UpperCAmelCase__ : str = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ : List[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 614 | 1 |
'''simple docstring'''
from collections.abc import Callable
def UpperCAmelCase_ (__a : Optional[Any] , __a : int , __a : List[str] ):
"""simple docstring"""
_a : float = a
_a : float = b
if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCamelCase ) == 0:
return b
elif (
function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_a : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(_lowerCamelCase ) == 0:
return mid
elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0:
_a : str = mid
else:
_a : List[Any] = mid
_a : Optional[Any] = start + (end - start) / 2.0
return mid
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase_ (__a : Dict , __a : Any=7 ):
"""simple docstring"""
_a : Dict = None
if token is not None:
_a : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
_a : Optional[Any] = '636036'
_a : str = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
_a : List[Any] = requests.get(__a , headers=__a ).json()
return result["workflow_runs"]
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
_a : Optional[Any] = get_daily_ci_runs(__a )
_a : List[str] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_a : Tuple = workflow_run['id']
break
return workflow_run_id
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] ):
"""simple docstring"""
_a : Tuple = get_last_daily_ci_runs(__a )
if workflow_run_id is not None:
_a : Optional[int] = get_artifacts_links(worflow_run_id=__a , token=__a )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_a : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__a , artifact_url=__a , output_dir=__a , token=__a )
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Any ):
"""simple docstring"""
get_last_daily_ci_artifacts(__a , __a , __a )
_a : List[Any] = {}
for artifact_name in artifact_names:
_a : int = os.path.join(__a , f"""{artifact_name}.zip""" )
if os.path.isfile(__a ):
_a : str = {}
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
with z.open(__a ) as f:
_a : Optional[Any] = f.read().decode('UTF-8' )
return results
| 319 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
"""simple docstring"""
if subparsers is not None:
__A= subparsers.add_parser('test' )
else:
__A= argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file',default=_SCREAMING_SNAKE_CASE,help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
),)
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__A= os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__A= script_name
else:
__A= f"""--config_file={args.config_file} {script_name}"""
__A= ['accelerate-launch'] + test_args.split()
__A= execute_subprocess_async(_SCREAMING_SNAKE_CASE,env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def UpperCAmelCase__( ):
"""simple docstring"""
__A= test_command_parser()
__A= parser.parse_args()
test_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 186 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a__ ( a_ ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__A= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'num_attention_heads' ) )
class a__ :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str=13 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : str=640 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict="silu" , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : Dict=None , ) -> Optional[Any]:
__A= parent
__A= batch_size
__A= image_size
__A= patch_size
__A= num_channels
__A= last_hidden_size
__A= num_attention_heads
__A= hidden_act
__A= conv_kernel_size
__A= output_stride
__A= hidden_dropout_prob
__A= attention_probs_dropout_prob
__A= classifier_dropout_prob
__A= use_labels
__A= is_training
__A= num_labels
__A= initializer_range
__A= scope
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__A= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A= None
__A= None
if self.use_labels:
__A= ids_tensor([self.batch_size] , self.num_labels )
__A= ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A= self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ) -> Dict:
__A= MobileViTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ) -> Optional[Any]:
__A= self.num_labels
__A= MobileViTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A= model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> int:
__A= self.num_labels
__A= MobileViTForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__A= model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
__A= self.prepare_config_and_inputs()
__A, __A, __A, __A= config_and_inputs
__A= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : Any = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : int = False
A : Dict = False
A : Dict = False
A : str = False
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__A= MobileViTModelTester(self )
__A= MobileViTConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowerCAmelCase ( self : Any ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowerCAmelCase ( self : int ) -> Any:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
pass
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__A, __A= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A= model_class(lowerCAmelCase_ )
__A= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A= [*signature.parameters.keys()]
__A= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase ( self : str ) -> int:
pass
def lowerCAmelCase ( self : str ) -> Any:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ):
__A= model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__A= model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__A= outputs.hidden_states
__A= 5
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__A= 2
for i in range(len(lowerCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__A, __A= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A= True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A= True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= MobileViTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def UpperCAmelCase__( ):
"""simple docstring"""
__A= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : int ) -> str:
__A= MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(lowerCAmelCase_ )
__A= self.default_image_processor
__A= prepare_img()
__A= image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__A= model(**lowerCAmelCase_ )
# verify the logits
__A= torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__A= torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
__A= MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__A= model.to(lowerCAmelCase_ )
__A= MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__A= prepare_img()
__A= image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__A= model(**lowerCAmelCase_ )
__A= outputs.logits
# verify the logits
__A= torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
__A= torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
__A= MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__A= model.to(lowerCAmelCase_ )
__A= MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__A= prepare_img()
__A= image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__A= model(**lowerCAmelCase_ )
__A= outputs.logits.detach().cpu()
__A= image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ , target_sizes=[(50, 60)] )
__A= torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
__A= image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ )
__A= torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
| 186 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = [0]
UpperCamelCase = [0]
UpperCamelCase = len(UpperCAmelCase_ )
self.assertEqual(k.knapsack(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , 0 )
UpperCamelCase = [60]
UpperCamelCase = [10]
UpperCamelCase = len(UpperCAmelCase_ )
self.assertEqual(k.knapsack(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , 0 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = [1, 2, 3]
UpperCamelCase = [3, 2, 1]
UpperCamelCase = len(UpperCAmelCase_ )
self.assertEqual(k.knapsack(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , 5 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> str:
"""simple docstring"""
UpperCamelCase = 50
UpperCamelCase = [60, 100, 120]
UpperCamelCase = [10, 20, 30]
UpperCamelCase = len(UpperCAmelCase_ )
self.assertEqual(k.knapsack(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 556 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase__ : List[Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> str:
"""simple docstring"""
if "://" in dataset_path:
SCREAMING_SNAKE_CASE_ : Any = dataset_path.split('://' )[1]
return dataset_path
def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem , lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : List[str] = threading.Lock()
| 105 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 1 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_A = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = {}
state_dict.pop('pixel_mean', a_ )
state_dict.pop('pixel_std', a_ )
lowerCamelCase : List[str] = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase : Optional[Any] = key.replace(a_, a_ )
if re.match(a_, a_ ):
lowerCamelCase : str = int(re.match(a_, a_ ).group(2 ) )
if layer_nb == 0:
lowerCamelCase : str = key.replace('layers.0', 'proj_in' )
elif layer_nb == 1:
lowerCamelCase : Any = key.replace('layers.1', 'layers.0' )
elif layer_nb == 2:
lowerCamelCase : str = key.replace('layers.2', 'proj_out' )
lowerCamelCase : List[str] = value
lowerCamelCase : Union[str, Any] = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def UpperCAmelCase ( a_, a_, a_, a_="ybelkada/segment-anything" ):
'''simple docstring'''
lowerCamelCase : Any = hf_hub_download(a_, F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
lowerCamelCase : Union[str, Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase : Optional[Any] = SamVisionConfig(
hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
lowerCamelCase : List[Any] = SamConfig(
vision_config=a_, )
elif "sam_vit_h" in model_name:
lowerCamelCase : Optional[Any] = SamVisionConfig(
hidden_size=1280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
lowerCamelCase : int = SamConfig(
vision_config=a_, )
lowerCamelCase : Any = torch.load(a_, map_location='cpu' )
lowerCamelCase : Optional[Any] = replace_keys(a_ )
lowerCamelCase : Any = SamImageProcessor()
lowerCamelCase : Any = SamProcessor(image_processor=a_ )
lowerCamelCase : List[str] = SamModel(a_ )
hf_model.load_state_dict(a_ )
lowerCamelCase : Dict = hf_model.to('cuda' )
lowerCamelCase : Tuple = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowerCamelCase : Dict = Image.open(requests.get(a_, stream=a_ ).raw ).convert('RGB' )
lowerCamelCase : Any = [[[400, 650]]]
lowerCamelCase : Dict = [[1]]
lowerCamelCase : str = processor(images=np.array(a_ ), return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase : Any = hf_model(**a_ )
lowerCamelCase : List[str] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase : Union[str, Any] = processor(
images=np.array(a_ ), input_points=a_, input_labels=a_, return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase : Optional[int] = hf_model(**a_ )
lowerCamelCase : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase : List[str] = ((75, 275, 1725, 850),)
lowerCamelCase : Optional[int] = processor(images=np.array(a_ ), input_boxes=a_, return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase : Optional[int] = hf_model(**a_ )
lowerCamelCase : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase : Optional[int] = [[[400, 650], [800, 650]]]
lowerCamelCase : List[Any] = [[1, 1]]
lowerCamelCase : str = processor(
images=np.array(a_ ), input_points=a_, input_labels=a_, return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCamelCase : Dict = hf_model(**a_ )
lowerCamelCase : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
_A = argparse.ArgumentParser()
_A = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
_A = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 133 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_A = 'us-east-1' # defaults region
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _UpperCamelCase ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def _UpperCamelCase ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework )
| 133 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Optional[NestedDataStructureLike[PathLike]] = None , snake_case_ : Optional[NamedSplit] = None , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[int] = None , **snake_case_ : int , ):
snake_case__ : Tuple = path_or_paths
snake_case__ : Optional[int] = split if split or isinstance(snake_case_ , snake_case_ ) else """train"""
snake_case__ : int = features
snake_case__ : Optional[int] = cache_dir
snake_case__ : Optional[int] = keep_in_memory
snake_case__ : str = streaming
snake_case__ : List[str] = num_proc
snake_case__ : Optional[int] = kwargs
@abstractmethod
def lowerCamelCase ( self : List[Any] ):
pass
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[int] = None , **snake_case_ : str , ):
snake_case__ : Tuple = features
snake_case__ : Tuple = cache_dir
snake_case__ : Optional[int] = keep_in_memory
snake_case__ : Optional[int] = streaming
snake_case__ : int = num_proc
snake_case__ : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase ( self : Tuple ):
pass
| 374 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374 | 1 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
_enforce_args(__UpperCamelCase , __UpperCamelCase )
if n == 0:
return 0
__A = float('''-inf''' )
for i in range(1 , n + 1 ):
__A = max(
__UpperCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __UpperCamelCase ) )
return max_revue
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
_enforce_args(__UpperCamelCase , __UpperCamelCase )
__A = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A = float('''-inf''' )
for i in range(1 , n + 1 ):
__A = max(
__UpperCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __UpperCamelCase , __UpperCamelCase ) , )
__A = max_revenue
return max_rev[n]
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
_enforce_args(__UpperCamelCase , __UpperCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A = [float('''-inf''' ) for _ in range(n + 1 )]
__A = 0
for i in range(1 , n + 1 ):
__A = max_rev[i]
for j in range(1 , i + 1 ):
__A = max(__UpperCamelCase , prices[j - 1] + max_rev[i - j] )
__A = max_revenue_i
return max_rev[n]
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if n < 0:
__A = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(__UpperCamelCase )
if n > len(__UpperCamelCase ):
__A = (
'''Each integral piece of rod must have a corresponding price. '''
f'Got n = {n} but length of prices = {len(__UpperCamelCase )}'
)
raise ValueError(__UpperCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
__A = len(__UpperCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A = 3_6
__A = top_down_cut_rod(__UpperCamelCase , __UpperCamelCase )
__A = bottom_up_cut_rod(__UpperCamelCase , __UpperCamelCase )
__A = naive_cut_rod_recursive(__UpperCamelCase , __UpperCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 215 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase_ = get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Saving model to {output_model_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A = os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(f'Saving model to {ckpt_dir}' )
__A = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCamelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
__A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A = (
os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__A = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCamelCase , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , planner=DefaultLoadPlanner() , )
__A = state_dict['''model''']
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A = FSDP.optim_state_dict(__UpperCamelCase , __UpperCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__A = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__A = os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__A = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__A = torch.load(__UpperCamelCase )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__A = (
os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__A = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , )
__A = optim_state['''optimizer''']
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__A = FSDP.optim_state_dict_to_load(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
optimizer.load_state_dict(__UpperCamelCase )
| 215 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.