code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(SCREAMING_SNAKE_CASE ), magnitude * sin(SCREAMING_SNAKE_CASE )]
return [magnitude * cos(radians(SCREAMING_SNAKE_CASE ) ), magnitude * sin(radians(SCREAMING_SNAKE_CASE ) )]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : float = 10**-1 ):
'''simple docstring'''
lowerCAmelCase = cross(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = sum(SCREAMING_SNAKE_CASE )
return abs(SCREAMING_SNAKE_CASE ) < eps
if __name__ == "__main__":
# Test to check if it works
SCREAMING_SNAKE_CASE__ = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
SCREAMING_SNAKE_CASE__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
SCREAMING_SNAKE_CASE__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
SCREAMING_SNAKE_CASE__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
SCREAMING_SNAKE_CASE__ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
SCREAMING_SNAKE_CASE__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 46
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=2, lowercase_=99, lowercase_=0, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=12, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_="last", lowercase_=None, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_lengths
a__ =use_token_type_ids
a__ =use_labels
a__ =gelu_activation
a__ =sinusoidal_embeddings
a__ =causal
a__ =asm
a__ =n_langs
a__ =vocab_size
a__ =n_special
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =summary_type
a__ =use_proj
a__ =scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_input_lengths:
a__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], 2 ).float()
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lengths=lowercase_, langs=lowercase_ )
a__ =model(lowercase_, langs=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> str:
"""simple docstring"""
a__ =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, p_mask=lowercase_, )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, )
((a__), ) =result_with_labels.to_tuple()
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
((a__), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =self.num_choices
a__ =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, token_type_ids=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> str:
"""simple docstring"""
a__ =super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =FlaubertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, emb_dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ =True
a__ =model_class(config=lowercase_ )
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =torch.jit.trace(
lowercase_, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_, os.path.join(lowercase_, '''traced_model.pt''' ) )
a__ =torch.jit.load(os.path.join(lowercase_, '''traced_model.pt''' ), map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ), inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
a__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 188
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = position
UpperCAmelCase : List[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase : List[Any] = []
for position in positions:
UpperCAmelCase : Optional[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase_ )
return permissible_positions
def UpperCamelCase( UpperCAmelCase_ ):
return not any(elem == 0 for row in board for elem in row )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if is_complete(UpperCAmelCase_ ):
return True
for position in get_valid_pos(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
UpperCAmelCase : Dict = position
if board[y][x] == 0:
UpperCAmelCase : Optional[Any] = curr + 1
if open_knight_tour_helper(UpperCAmelCase_ , UpperCAmelCase_ , curr + 1 ):
return True
UpperCAmelCase : Tuple = 0
return False
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = 1
if open_knight_tour_helper(UpperCAmelCase_ , (i, j) , 1 ):
return board
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[int] = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : DDIMScheduler ) -> int:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : str , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase : str = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowercase_ , )
UpperCAmelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Tuple = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase : Dict = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
UpperCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase : Any = self.vqvae.decode(lowercase_ ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 280
| 0
|
"""simple docstring"""
import os
import sys
_a : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_a : Optional[Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : List[str] ,**_lowerCamelCase : Optional[Any] ) -> Any:
return AutoConfig.from_pretrained(*_lowerCamelCase ,**_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : List[str] ,**_lowerCamelCase : int ) -> List[str]:
return AutoTokenizer.from_pretrained(*_lowerCamelCase ,**_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : Any ,**_lowerCamelCase : str ) -> Any:
return AutoModel.from_pretrained(*_lowerCamelCase ,**_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : List[str] ,**_lowerCamelCase : List[Any] ) -> Dict:
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase ,**_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : List[Any] ,**_lowerCamelCase : Optional[int] ) -> Union[str, Any]:
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase ,**_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : List[Any] ,**_lowerCamelCase : Dict ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase ,**_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : Union[str, Any] ,**_lowerCamelCase : List[Any] ) -> Optional[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase ,**_lowerCamelCase )
| 44
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 1
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class A_ ( a__ ):
_UpperCAmelCase : Union[str, Any] = '''efficientnet'''
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 6_0_0 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 3.1 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,SCREAMING_SNAKE_CASE__ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] ,SCREAMING_SNAKE_CASE__ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] ,SCREAMING_SNAKE_CASE__ : List[int] = [] ,SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,SCREAMING_SNAKE_CASE__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : float = 0.25 ,SCREAMING_SNAKE_CASE__ : str = "swish" ,SCREAMING_SNAKE_CASE__ : int = 2_5_6_0 ,SCREAMING_SNAKE_CASE__ : str = "mean" ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 0.001 ,SCREAMING_SNAKE_CASE__ : float = 0.99 ,SCREAMING_SNAKE_CASE__ : float = 0.5 ,SCREAMING_SNAKE_CASE__ : float = 0.2 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = width_coefficient
__lowerCamelCase : Any = depth_coefficient
__lowerCamelCase : Any = depth_divisor
__lowerCamelCase : Optional[Any] = kernel_sizes
__lowerCamelCase : Union[str, Any] = in_channels
__lowerCamelCase : List[Any] = out_channels
__lowerCamelCase : Optional[Any] = depthwise_padding
__lowerCamelCase : int = strides
__lowerCamelCase : int = num_block_repeats
__lowerCamelCase : Optional[Any] = expand_ratios
__lowerCamelCase : int = squeeze_expansion_ratio
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Union[str, Any] = pooling_type
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = batch_norm_eps
__lowerCamelCase : Optional[int] = batch_norm_momentum
__lowerCamelCase : Any = dropout_rate
__lowerCamelCase : List[Any] = drop_connect_rate
__lowerCamelCase : int = sum(SCREAMING_SNAKE_CASE__) * 4
class A_ ( a__ ):
_UpperCAmelCase : Tuple = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Union[str, Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCAmelCase ( self : List[Any]):
return 1E-5
| 370
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : List[str]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = AutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[Any]):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Tuple):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = AutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Any):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = AutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : Union[str, Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
| 113
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__a = "hf-internal-testing/tiny-random-bert"
__a = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__a = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
snake_case_ :Tuple = cached_file(snake_case , snake_case )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(snake_case ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(snake_case , snake_case ) ) )
with open(os.path.join(snake_case , """refs""" , """main""" ) ) as f:
snake_case_ :List[str] = f.read()
self.assertEqual(snake_case , os.path.join(snake_case , """snapshots""" , snake_case , snake_case ) )
self.assertTrue(os.path.isfile(snake_case ) )
# File is cached at the same place the second time.
snake_case_ :Tuple = cached_file(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
# Using a specific revision to test the full commit hash.
snake_case_ :List[str] = cached_file(snake_case , snake_case , revision="""9b8c223""" )
self.assertEqual(snake_case , os.path.join(snake_case , """snapshots""" , snake_case , snake_case ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
with self.assertRaisesRegex(snake_case , """is not a valid model identifier""" ):
snake_case_ :int = cached_file("""tiny-random-bert""" , snake_case )
with self.assertRaisesRegex(snake_case , """is not a valid git identifier""" ):
snake_case_ :str = cached_file(snake_case , snake_case , revision="""aaaa""" )
with self.assertRaisesRegex(snake_case , """does not appear to have a file named""" ):
snake_case_ :Tuple = cached_file(snake_case , """conf""" )
def lowerCAmelCase_ ( self: int ) -> List[str]:
with self.assertRaisesRegex(snake_case , """does not appear to have a file named""" ):
snake_case_ :Any = cached_file(snake_case , """conf""" )
with open(os.path.join(snake_case , """refs""" , """main""" ) ) as f:
snake_case_ :Optional[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(snake_case , """.no_exist""" , snake_case , """conf""" ) ) )
snake_case_ :List[str] = cached_file(snake_case , """conf""" , _raise_exceptions_for_missing_entries=snake_case )
self.assertIsNone(snake_case )
snake_case_ :int = cached_file(snake_case , """conf""" , local_files_only=snake_case , _raise_exceptions_for_missing_entries=snake_case )
self.assertIsNone(snake_case )
snake_case_ :Optional[int] = mock.Mock()
snake_case_ :List[Any] = 500
snake_case_ :List[str] = {}
snake_case_ :Dict = HTTPError
snake_case_ :Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=snake_case ) as mock_head:
snake_case_ :Tuple = cached_file(snake_case , """conf""" , _raise_exceptions_for_connection_errors=snake_case )
self.assertIsNone(snake_case )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self: str ) -> Tuple:
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , snake_case ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , snake_case ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , snake_case ) )
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(snake_case , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , snake_case )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(snake_case , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , snake_case , revision="""ahaha""" )
snake_case_ :Optional[Any] = get_file_from_repo("""bert-base-cased""" , snake_case )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case_ :int = json.loads(open(snake_case , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCAmelCase_ ( self: List[Any] ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ :Union[str, Any] = Path(snake_case ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(snake_case , """a.txt""" ) , str(snake_case ) )
self.assertIsNone(get_file_from_repo(snake_case , """b.txt""" ) )
| 66
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> int:
if attention_mask is None:
__lowercase : int = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
"""simple docstring"""
A__ : Union[str, Any] = OPTConfig
A__ : Optional[int] = {}
A__ : Optional[int] = '''gelu'''
def __init__( self : Tuple , _snake_case : Dict , _snake_case : List[str]=13 , _snake_case : Optional[Any]=7 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=False , _snake_case : Union[str, Any]=99 , _snake_case : Dict=16 , _snake_case : Any=2 , _snake_case : Dict=4 , _snake_case : List[Any]=4 , _snake_case : Optional[int]="gelu" , _snake_case : List[str]=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[Any]=20 , _snake_case : Any=2 , _snake_case : List[str]=1 , _snake_case : Tuple=0 , _snake_case : Dict=16 , _snake_case : Tuple=16 , ):
__lowercase : Dict = parent
__lowercase : str = batch_size
__lowercase : List[str] = seq_length
__lowercase : Optional[int] = is_training
__lowercase : Optional[int] = use_labels
__lowercase : Optional[int] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : Any = eos_token_id
__lowercase : List[Any] = pad_token_id
__lowercase : Optional[int] = bos_token_id
__lowercase : List[str] = embed_dim
__lowercase : Any = word_embed_proj_dim
__lowercase : Optional[int] = False
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
__lowercase : Optional[int] = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
__lowercase : int = TFOPTModel(config=_snake_case )
__lowercase : Union[str, Any] = inputs_dict['''input_ids''']
__lowercase : Tuple = input_ids[:1, :]
__lowercase : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__lowercase : Dict = 1
# first forward pass
__lowercase : Dict = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
__lowercase , __lowercase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase : Any = model(_snake_case , attention_mask=_snake_case )[0]
__lowercase : List[Any] = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__lowercase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A__ : int = (TFOPTForCausalLM,) if is_tf_available() else ()
A__ : List[str] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
A__ : Union[str, Any] = False
A__ : Optional[int] = False
A__ : int = False
A__ : List[str] = 1_0
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = TFOPTModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=_snake_case )
def snake_case_ ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case_ ( self : int ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_snake_case : Optional[int] , _snake_case : Dict ):
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase : Union[str, Any] = model_class(config=_snake_case )
__lowercase : int = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : str = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
__lowercase : Dict = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : Tuple = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
__lowercase : str = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : str = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
__lowercase : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : Optional[Any] = False
self.assertTrue(_snake_case )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
return tf.constant(__lowerCAmelCase , dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = 9_9
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase : List[str] = input_ids.shape[0]
__lowercase : int = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Tuple ):
__lowercase : int = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase : Optional[int] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowercase : int = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
__lowercase : Optional[int] = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
__lowercase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
__lowercase : Tuple = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
__lowercase : Union[str, Any] = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : Dict = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
super().setUp()
__lowercase : Tuple = '''facebook/opt-350m'''
def snake_case_ ( self : int ):
__lowercase : Any = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase : Dict = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase : Union[str, Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase : int = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case , add_special_tokens=_snake_case )
__lowercase : str = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase : str = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
__lowercase : Any = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self : str ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[Any] = '''facebook/opt-125m'''
__lowercase : int = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Dict = []
__lowercase : Optional[int] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : List[Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(_snake_case , max_length=10 )
__lowercase : Union[str, Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[str] = '''facebook/opt-350m'''
__lowercase : List[Any] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : Tuple = TFOPTForCausalLM.from_pretrained(_snake_case )
__lowercase : List[str] = '''left'''
# use different length sentences to test batching
__lowercase : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase : List[str] = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case )
__lowercase : Optional[Any] = inputs['''input_ids''']
__lowercase : List[str] = model.generate(input_ids=_snake_case , attention_mask=inputs['''attention_mask'''] )
__lowercase : List[str] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(input_ids=_snake_case )
__lowercase : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowercase : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase : Optional[Any] = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
__lowercase : str = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__lowercase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
__lowercase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
__lowercase : List[Any] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[Any] = '''facebook/opt-350m'''
__lowercase : str = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Union[str, Any] = []
__lowercase : List[str] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : Union[str, Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : List[str] = model.generate(_snake_case , max_length=10 )
__lowercase : Tuple = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 156
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
if exponent == 1:
return base
if exponent % 2 == 0:
SCREAMING_SNAKE_CASE = _modexpt(UpperCAmelCase__ , exponent // 2 , UpperCAmelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCAmelCase__ , exponent - 1 , UpperCAmelCase__ )) % modulo_value
def __lowerCamelCase (UpperCAmelCase__ : int = 1_7_7_7 , UpperCAmelCase__ : int = 1_8_5_5 , UpperCAmelCase__ : int = 8 ):
SCREAMING_SNAKE_CASE = base
for _ in range(1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = _modexpt(UpperCAmelCase__ , UpperCAmelCase__ , 1_0**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 206
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( a ):
lowercase__ : Optional[Any] = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ : Optional[int] = """CIDAS/clipseg-rd64-refined"""
lowercase__ : Tuple = """image_segmenter"""
lowercase__ : Optional[Any] = CLIPSegForImageSegmentation
lowercase__ : int = ["""image""", """text"""]
lowercase__ : List[str] = ["""image"""]
def __init__( self : str , *_UpperCamelCase : str , **_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : "Image" , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCamelCase , return_tensors="pt" )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE = self.model(**_UpperCamelCase ).logits
return logits
def __snake_case( self : Any , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 206
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """table-transformer"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self :int , _UpperCamelCase :Tuple=True , _UpperCamelCase :int=None , _UpperCamelCase :Tuple=3 , _UpperCamelCase :int=100 , _UpperCamelCase :int=6 , _UpperCamelCase :List[str]=2048 , _UpperCamelCase :Tuple=8 , _UpperCamelCase :int=6 , _UpperCamelCase :Dict=2048 , _UpperCamelCase :List[str]=8 , _UpperCamelCase :str=0.0 , _UpperCamelCase :Optional[int]=0.0 , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :Dict="relu" , _UpperCamelCase :Dict=256 , _UpperCamelCase :Optional[Any]=0.1 , _UpperCamelCase :Union[str, Any]=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :List[Any]=0.0_2 , _UpperCamelCase :Optional[Any]=1.0 , _UpperCamelCase :Any=False , _UpperCamelCase :Tuple="sine" , _UpperCamelCase :Optional[int]="resnet50" , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :Optional[int]=False , _UpperCamelCase :Dict=1 , _UpperCamelCase :Any=5 , _UpperCamelCase :Optional[Any]=2 , _UpperCamelCase :Dict=1 , _UpperCamelCase :Tuple=1 , _UpperCamelCase :int=5 , _UpperCamelCase :Optional[int]=2 , _UpperCamelCase :List[str]=0.1 , **_UpperCamelCase :Union[str, Any] , )-> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__A = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = backbone_config.get('''model_type''' )
__A = CONFIG_MAPPING[backbone_model_type]
__A = config_class.from_dict(_UpperCamelCase )
# set timm attributes to None
__A , __A , __A = None, None, None
__A = use_timm_backbone
__A = backbone_config
__A = num_channels
__A = num_queries
__A = d_model
__A = encoder_ffn_dim
__A = encoder_layers
__A = encoder_attention_heads
__A = decoder_ffn_dim
__A = decoder_layers
__A = decoder_attention_heads
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = activation_function
__A = init_std
__A = init_xavier_std
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = encoder_layers
__A = auxiliary_loss
__A = position_embedding_type
__A = backbone
__A = use_pretrained_backbone
__A = dilation
# Hungarian matcher
__A = class_cost
__A = bbox_cost
__A = giou_cost
# Loss coefficients
__A = mask_loss_coefficient
__A = dice_loss_coefficient
__A = bbox_loss_coefficient
__A = giou_loss_coefficient
__A = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def _lowerCAmelCase (self :str )-> int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase (self :Dict )-> int:
return self.d_model
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def _lowerCAmelCase (self :List[Any] )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowerCAmelCase (self :int )-> float:
return 1e-5
@property
def _lowerCAmelCase (self :Union[str, Any] )-> int:
return 12
| 117
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[Any] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117
| 1
|
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
lowerCAmelCase__ = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.009
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__="train" ):
"""simple docstring"""
return calculate_hypothesis_value(lowerCamelCase__ , lowerCamelCase__ ) - output(
lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = 0
for i in range(len(lowerCamelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=m ):
"""simple docstring"""
lowercase__ : int = 0
for i in range(lowerCamelCase__ ):
if index == -1:
summation_value += _error(lowerCamelCase__ )
else:
summation_value += _error(lowerCamelCase__ ) * train_data[i][0][index]
return summation_value
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = summation_of_cost_derivative(lowerCamelCase__ , lowerCamelCase__ ) / m
return cost_derivative_value
def __lowerCamelCase ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase__ : Optional[int] = 0.000002
lowercase__ : List[Any] = 0
lowercase__ : Optional[Any] = 0
while True:
j += 1
lowercase__ : Any = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase__ ) ):
lowercase__ : str = get_cost_derivative(i - 1 )
lowercase__ : List[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase__ , lowerCamelCase__ , atol=lowerCamelCase__ , rtol=lowerCamelCase__ , ):
break
lowercase__ : Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def __lowerCamelCase ( ):
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) ):
print(("Actual output value:", output(lowerCamelCase__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 121
|
def __lowerCamelCase ( lowerCamelCase__ = 1_000 ):
"""simple docstring"""
lowercase__ , lowercase__ : int = 1, 1
lowercase__ : List[Any] = []
for i in range(1 , n + 1 ):
lowercase__ : Dict = prev_numerator + 2 * prev_denominator
lowercase__ : Tuple = prev_numerator + prev_denominator
if len(str(lowerCamelCase__ ) ) > len(str(lowerCamelCase__ ) ):
result.append(lowerCamelCase__ )
lowercase__ : int = numerator
lowercase__ : int = denominator
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : torch.FloatTensor
class __snake_case ( a , a ):
@register_to_config
def __init__( self : List[str] , _snake_case : int = 65536 , _snake_case : Optional[int] = None , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 0 , _snake_case : str = "fourier" , _snake_case : bool = True , _snake_case : bool = False , _snake_case : float = 0.0 , _snake_case : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case : Tuple[str] = "UNetMidBlock1D" , _snake_case : str = None , _snake_case : Tuple[int] = (32, 32, 64) , _snake_case : str = None , _snake_case : int = 8 , _snake_case : int = 1 , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case)
UpperCAmelCase_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case)
UpperCAmelCase_ = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ = block_out_channels[0] * 4
UpperCAmelCase_ = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = None
# down
UpperCAmelCase_ = in_channels
for i, down_block_type in enumerate(_snake_case):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ = i == len(_snake_case) - 1
UpperCAmelCase_ = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case)
# mid
UpperCAmelCase_ = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
UpperCAmelCase_ = list(reversed(_snake_case))
UpperCAmelCase_ = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ = out_channels
else:
UpperCAmelCase_ = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = (
reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels
)
UpperCAmelCase_ = i == len(_snake_case) - 1
UpperCAmelCase_ = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case)
UpperCAmelCase_ = output_channel
# out
UpperCAmelCase_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
UpperCAmelCase_ = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase ( self : str , _snake_case : torch.FloatTensor , _snake_case : Union[torch.Tensor, float, int] , _snake_case : bool = True , ):
"""simple docstring"""
UpperCAmelCase_ = timestep
if not torch.is_tensor(_snake_case):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0:
UpperCAmelCase_ = timesteps[None].to(sample.device)
UpperCAmelCase_ = self.time_proj(_snake_case)
if self.config.use_timestep_embedding:
UpperCAmelCase_ = self.time_mlp(_snake_case)
else:
UpperCAmelCase_ = timestep_embed[..., None]
UpperCAmelCase_ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
UpperCAmelCase_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
UpperCAmelCase_ = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ = downsample_block(hidden_states=_snake_case , temb=_snake_case)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ = self.mid_block(_snake_case , _snake_case)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
UpperCAmelCase_ = down_block_res_samples[-1:]
UpperCAmelCase_ = down_block_res_samples[:-1]
UpperCAmelCase_ = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case)
# 5. post-process
if self.out_block:
UpperCAmelCase_ = self.out_block(_snake_case , _snake_case)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case)
| 51
|
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A : int = 6 ) ->None:
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
self.create_linked_list(A )
def __lowerCamelCase ( self : Optional[int] , A : int ) ->None:
lowerCamelCase__ : Optional[int] = Node()
lowerCamelCase__ : List[str] = current_node
lowerCamelCase__ : Union[str, Any] = current_node
lowerCamelCase__ : List[str] = current_node
for _ in range(1 , A ):
lowerCamelCase__ : List[str] = Node()
lowerCamelCase__ : List[Any] = current_node
lowerCamelCase__ : Optional[Any] = previous_node
lowerCamelCase__ : Dict = current_node
lowerCamelCase__ : Union[str, Any] = self.front
lowerCamelCase__ : int = previous_node
def __lowerCamelCase ( self : Optional[int] ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self : Optional[int] ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self : Optional[int] , A : Any ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase__ : List[str] = self.rear.next
if self.rear:
lowerCamelCase__ : Optional[Any] = data
def __lowerCamelCase ( self : str ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase__ : List[Any] = self.front.data
lowerCamelCase__ : Optional[Any] = None
return data
lowerCamelCase__ : Optional[int] = self.front
lowerCamelCase__ : Optional[int] = old_front.next
lowerCamelCase__ : Any = old_front.data
lowerCamelCase__ : List[str] = None
return data
def __lowerCamelCase ( self : Dict ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCamelCase ( self : int ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->None:
lowerCamelCase__ : Any | None = None
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:str = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """sew-d"""
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase=2 , lowerCamelCase=512 , lowerCamelCase=256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("p2c", "c2p") , lowerCamelCase="layer_norm" , lowerCamelCase="gelu_python" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-7 , lowerCamelCase=1E-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = squeeze_factor
__a = max_position_embeddings
__a = position_buckets
__a = share_att_key
__a = relative_attention
__a = norm_rel_ebd
__a = list(lowerCamelCase )
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layer_norm_eps
__a = feature_layer_norm_eps
__a = initializer_range
__a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# sequence classification
__a = use_weighted_layer_sum
__a = classifier_proj_size
@property
def a__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 268
|
"""simple docstring"""
def _lowerCamelCase( a , a , a , a ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__a = mf_knapsack(i - 1 , a , a , a )
else:
__a = max(
mf_knapsack(i - 1 , a , a , a ) , mf_knapsack(i - 1 , a , a , j - wt[i - 1] ) + val[i - 1] , )
__a = val
return f[i][j]
def _lowerCamelCase( a , a , a , a ):
__a = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__a = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__a = dp[i - 1][w_]
return dp[n][w_], dp
def _lowerCamelCase( a , a , a ):
if not (isinstance(a , (list, tuple) ) and isinstance(a , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__a = len(a )
if num_items != len(a ):
__a = (
"The number of weights must be the same as the number of values.\n"
F"But got {num_items} weights and {len(a )} values"
)
raise ValueError(a )
for i in range(a ):
if not isinstance(wt[i] , a ):
__a = (
"All weights must be integers but got weight of "
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(a )
__a , __a = knapsack(a , a , a , a )
__a = set()
_construct_solution(a , a , a , a , a )
return optimal_val, example_optional_set
def _lowerCamelCase( a , a , a , a , a ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(a , a , i - 1 , a , a )
else:
optimal_set.add(a )
_construct_solution(a , a , i - 1 , j - wt[i - 1] , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE__:List[str] = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE__:List[str] = 4
SCREAMING_SNAKE_CASE__:List[str] = 6
SCREAMING_SNAKE_CASE__:Optional[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Optional[int] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 268
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ : List[Any] = True
except (ImportError, AttributeError):
UpperCAmelCase_ : Tuple = object
def SCREAMING_SNAKE_CASE_ ( *__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Dict = logging.get_logger('''transformers-cli/serving''')
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Namespace ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__magic_name__ , args.host , args.port , args.workers )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : dict
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[str]
snake_case__ : Optional[List[int]]
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any
class _SCREAMING_SNAKE_CASE ( _a ):
@staticmethod
def _A ( __lowerCamelCase : ArgumentParser ):
UpperCamelCase :int = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=__lowerCamelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__lowerCamelCase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=__lowerCamelCase , default=8_888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=__lowerCamelCase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=__lowerCamelCase , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=__lowerCamelCase , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=__lowerCamelCase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=__lowerCamelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__lowerCamelCase )
def __init__( self : Optional[int] , __lowerCamelCase : Pipeline , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
UpperCamelCase :Optional[Any] = pipeline
UpperCamelCase :List[str] = host
UpperCamelCase :Dict = port
UpperCamelCase :List[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F"""Serving model over {host}:{port}""" )
UpperCamelCase :Any = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""POST"""] , ),
] , timeout=600 , )
def _A ( self : str ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _A ( self : Optional[Any] ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _A ( self : Optional[Any] , __lowerCamelCase : str = Body(__lowerCamelCase , embed=__lowerCamelCase ) , __lowerCamelCase : bool = Body(__lowerCamelCase , embed=__lowerCamelCase ) ):
try:
UpperCamelCase :Tuple = self._pipeline.tokenizer.tokenize(__lowerCamelCase )
if return_ids:
UpperCamelCase :int = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCamelCase )
return ServeTokenizeResult(tokens=__lowerCamelCase , tokens_ids=__lowerCamelCase )
else:
return ServeTokenizeResult(tokens=__lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCamelCase )} )
def _A ( self : Optional[int] , __lowerCamelCase : List[int] = Body(__lowerCamelCase , embed=__lowerCamelCase ) , __lowerCamelCase : bool = Body(__lowerCamelCase , embed=__lowerCamelCase ) , __lowerCamelCase : bool = Body(__lowerCamelCase , embed=__lowerCamelCase ) , ):
try:
UpperCamelCase :Any = self._pipeline.tokenizer.decode(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return ServeDeTokenizeResult(model="""""" , text=__lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCamelCase )} )
async def _A ( self : Tuple , __lowerCamelCase : Dict=Body(__lowerCamelCase , embed=__lowerCamelCase ) ):
# Check we don't have empty string
if len(__lowerCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
UpperCamelCase :Optional[int] = self._pipeline(__lowerCamelCase )
return ServeForwardResult(output=__lowerCamelCase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(__lowerCamelCase )} )
| 38
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : int = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
UpperCamelCase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase :str = value
elif weight_type == "weight_g":
UpperCamelCase :int = value
elif weight_type == "weight_v":
UpperCamelCase :int = value
elif weight_type == "bias":
UpperCamelCase :List[Any] = value
else:
UpperCamelCase :Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Dict = fairseq_model.state_dict()
UpperCamelCase :int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase :str = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase :Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase :Optional[int] = True
if "*" in mapped_key:
UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2]
UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
UpperCamelCase :List[Any] = """weight_g"""
elif "weight_v" in name:
UpperCamelCase :List[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase :Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase :List[str] = """weight"""
else:
UpperCamelCase :Optional[int] = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase :int = name.split(""".""" )
UpperCamelCase :str = int(items[0] )
UpperCamelCase :str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase :Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase :Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int:
"""simple docstring"""
UpperCamelCase :List[Any] = torch.load(__magic_name__ )
UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCamelCase :int = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ )
else:
UpperCamelCase :Any = WavLMConfig()
UpperCamelCase :Dict = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 38
| 1
|
"""simple docstring"""
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 353
|
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a__ : int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,_SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase = UniSpeechSatConfig()
__UpperCamelCase = ''
if is_finetuned:
__UpperCamelCase = UniSpeechSatForCTC(_SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase = UniSpeechSatForPreTraining(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__UpperCamelCase = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
a__ : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a__ : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 349
|
"""simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_snake_case : str = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCamelCase , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[str] , lowerCAmelCase_ : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : GenericTensor ) -> np.ndarray:
__lowerCAmelCase = self.get_masked_index(lowerCAmelCase_ )
__lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowercase ( self : Any , lowerCAmelCase_ : GenericTensor ) -> Optional[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Dict ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__lowerCAmelCase = self.framework
__lowerCAmelCase = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.ensure_exactly_one_mask_token(lowerCAmelCase_ )
return model_inputs
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any ) -> List[str]:
__lowerCAmelCase = self.model(**lowerCAmelCase_ )
__lowerCAmelCase = model_inputs['input_ids']
return model_outputs
def lowercase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Tuple=None ) -> List[Any]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__lowerCAmelCase = target_ids.shape[0]
__lowerCAmelCase = model_outputs['input_ids'][0]
__lowerCAmelCase = model_outputs['logits']
if self.framework == "tf":
__lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__lowerCAmelCase = outputs.numpy()
__lowerCAmelCase = outputs[0, masked_index, :]
__lowerCAmelCase = stable_softmax(lowerCAmelCase_ , axis=-1 )
if target_ids is not None:
__lowerCAmelCase = tf.gather_nd(tf.squeeze(lowerCAmelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
__lowerCAmelCase = tf.expand_dims(lowerCAmelCase_ , 0 )
__lowerCAmelCase = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__lowerCAmelCase = outputs[0, masked_index, :]
__lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__lowerCAmelCase = probs[..., target_ids]
__lowerCAmelCase , __lowerCAmelCase = probs.topk(lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__lowerCAmelCase = target_ids[p].tolist()
__lowerCAmelCase = p
# Filter padding out:
__lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__lowerCAmelCase = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(lowerCAmelCase_ )
result.append(lowerCAmelCase_ )
if single_mask:
return result[0]
return result
def lowercase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=None ) -> List[str]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = [targets]
try:
__lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
__lowerCAmelCase = {}
__lowerCAmelCase = []
for target in targets:
__lowerCAmelCase = vocab.get(lowerCAmelCase_ , lowerCAmelCase_ )
if id_ is None:
__lowerCAmelCase = self.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , max_length=1 , truncation=lowerCAmelCase_ , )['input_ids']
if len(lowerCAmelCase_ ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
__lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
__lowerCAmelCase = np.array(lowerCAmelCase_ )
return target_ids
def lowercase ( self : int , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None ) -> Dict:
__lowerCAmelCase = {}
if targets is not None:
__lowerCAmelCase = self.get_target_ids(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = target_ids
if top_k is not None:
__lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : int , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ) -> Any:
__lowerCAmelCase = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1:
return outputs[0]
return outputs
| 366
|
import mpmath # for roots of unity
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None ) -> List[Any]:
# Input as list
__lowerCAmelCase = list(poly_a or [0] )[:]
__lowerCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowerCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__lowerCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__lowerCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__lowerCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__lowerCAmelCase = self.__multiply()
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str ) -> Optional[int]:
__lowerCAmelCase = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCAmelCase_ ) <= 1:
return dft[0]
#
__lowerCAmelCase = self.c_max_length // 2
while next_ncol > 0:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root**next_ncol
# First half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__lowerCAmelCase = new_dft
__lowerCAmelCase = next_ncol // 2
return dft[0]
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = self.__dft('A' )
__lowerCAmelCase = self.__dft('B' )
__lowerCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__lowerCAmelCase = 2
while next_ncol <= self.c_max_length:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root ** (next_ncol // 2)
__lowerCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__lowerCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
__lowerCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> int:
__lowerCAmelCase = 'A = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
__lowerCAmelCase = 'B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
__lowerCAmelCase = 'A*B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
a ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 73
|
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73
| 1
|
def snake_case_(_UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = [int(_UpperCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_UpperCamelCase ) == 4 and all(0 <= int(_UpperCamelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
__A = input().strip()
__A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 353
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase_ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase_ : List[Any] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def snake_case_() -> int:
"""simple docstring"""
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case_() -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case_() -> int:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 278
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_a = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
_a = in_proj_weight[
: encoder_config.hidden_size, :
]
_a = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_a = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
_a = dct.pop(lowerCAmelCase__ )
_a = val
def _A (lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
_a = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_a = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
_a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
return im
@torch.no_grad()
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a = ViTConfig(image_size=3_84 , qkv_bias=lowerCAmelCase__ )
_a = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_a = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_a = 10_24
_a = 40_96
_a = 24
_a = 16
_a = 10_24
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_a = False
_a = 'relu'
_a = 10_24
_a = True
_a = False
_a = False
# load HuggingFace model
_a = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
_a = TrOCRForCausalLM(lowerCAmelCase__ )
_a = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
_a = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' , check_hash=lowerCAmelCase__ )['model']
_a = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_a = state_dict.pop(lowerCAmelCase__ )
if key.startswith('decoder' ) and "output_projection" not in key:
_a = val
else:
_a = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
_a = ViTImageProcessor(size=encoder_config.image_size )
_a = RobertaTokenizer.from_pretrained('roberta-large' )
_a = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
_a = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors='pt' ).pixel_values
# verify logits
_a = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_a = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
_a = outputs.logits
_a = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_a = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
_a = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
_a = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
_a = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCAmelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ : List[Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 168
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """marian"""
_lowerCAmelCase = ["""past_key_values"""]
_lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __magic_name__=5_81_01 , __magic_name__=None , __magic_name__=10_24 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__=True , __magic_name__="gelu" , __magic_name__=10_24 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=5_81_00 , __magic_name__=False , __magic_name__=5_81_00 , __magic_name__=0 , __magic_name__=0 , __magic_name__=True , **__magic_name__ , ) -> str:
_a = vocab_size
_a = decoder_vocab_size or vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class a ( _SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a = {0: 'batch'}
_a = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_a = {0: 'batch', 1: 'decoder_sequence'}
_a = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__magic_name__ , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
_a = common_inputs['decoder_input_ids'].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__magic_name__ , __magic_name__ )
_a = max(__magic_name__ , __magic_name__ ) - min_num_layers
_a = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['attention_mask'].dtype
_a = torch.cat(
[common_inputs['attention_mask'], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
_a = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
_a = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
_a = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-4
| 168
| 1
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_128s''', pretrained=__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_128''', pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_192''', pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_256''', pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_384''', pretrained=__lowerCamelCase )
from_model.eval()
SCREAMING_SNAKE_CASE_ = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ = from_model.state_dict()
SCREAMING_SNAKE_CASE_ = list(from_model.state_dict().keys() )
SCREAMING_SNAKE_CASE_ = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ), len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.randn((2, 3, 2_24, 2_24) )
SCREAMING_SNAKE_CASE_ = from_model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase, __lowerCamelCase ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
SCREAMING_SNAKE_CASE_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = True ):
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = (1, num_labels)
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = partial(__lowerCamelCase, num_labels=__lowerCamelCase, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
SCREAMING_SNAKE_CASE_ = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], __lowerCamelCase, names_to_config[model_name], __lowerCamelCase, __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 367
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 257
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__lowerCAmelCase : Tuple = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCAmelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __magic_name__ ( A : str ):
'''simple docstring'''
with open(A, "rb" ) as f:
a = Image.open(A )
return im.convert("RGB" )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the training data."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
SCREAMING_SNAKE_CASE_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE_ : str = field(default=_UpperCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __magic_name__ ( A : Dict ):
'''simple docstring'''
a = torch.stack([example["pixel_values"] for example in examples] )
a = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification", A, A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a = training_args.get_process_log_level()
logger.setLevel(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task="image-classification", use_auth_token=True if model_args.use_auth_token else None, )
else:
a = {}
if data_args.train_dir is not None:
a = os.path.join(data_args.train_dir, "**" )
if data_args.validation_dir is not None:
a = os.path.join(data_args.validation_dir, "**" )
a = load_dataset(
"imagefolder", data_files=A, cache_dir=model_args.cache_dir, task="image-classification", )
# If we don't have a validation split, split off a percentage of train as validation.
a = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, A ) and data_args.train_val_split > 0.0:
a = dataset["train"].train_test_split(data_args.train_val_split )
a = split["train"]
a = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a = dataset["train"].features["labels"].names
a , a = {}, {}
for i, label in enumerate(A ):
a = str(A )
a = label
# Load the accuracy metric from the datasets package
a = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A : int ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(A ), labelaid=A, idalabel=A, finetuning_task="image-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
a = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=A, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
a = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a = image_processor.size["shortest_edge"]
else:
a = (image_processor.size["height"], image_processor.size["width"])
a = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
a = Compose(
[
RandomResizedCrop(A ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a = Compose(
[
Resize(A ),
CenterCrop(A ),
ToTensor(),
normalize,
] )
def train_transforms(A : Dict ):
a = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A : List[str] ):
a = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
a = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
a = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A )
# Initalize our trainer
a = Trainer(
model=A, args=A, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=A, tokenizer=A, data_collator=A, )
# Training
if training_args.do_train:
a = None
if training_args.resume_from_checkpoint is not None:
a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a = last_checkpoint
a = trainer.train(resume_from_checkpoint=A )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a = trainer.evaluate()
trainer.log_metrics("eval", A )
trainer.save_metrics("eval", A )
# Write model card and (optionally) push to hub
a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
if __name__ == "__main__":
main()
| 107
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(A ):
values.append(rng.randint(0, vocab_size - 1 ) )
a = np.array(A, dtype=jnp.intaa ).reshape(A )
return output
def __magic_name__ ( A : Dict, A : Union[str, Any]=None ):
'''simple docstring'''
a = ids_tensor(A, vocab_size=2, rng=A )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = ()
def __UpperCAmelCase ( self : int ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs["input_ids"].shape[-1] // 2
a = inputs["input_ids"][:max_batch_size, :sequence_length]
a = jnp.ones_like(__lowerCamelCase )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = pt_model_class(__lowerCamelCase ).eval()
a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
a = flax_model.generate(__lowerCamelCase ).sequences
a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = "Hello world"
a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
a = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 107
| 1
|
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
if not nums:
raise ValueError('List is empty')
return sum(_UpperCAmelCase) / len(_UpperCAmelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( A__ ):
def __init__( self , *a , a=None , a=None , a=None , **a) -> List[Any]:
super().__init__(*a , **a)
SCREAMING_SNAKE_CASE = eval_examples
SCREAMING_SNAKE_CASE = post_process_function
SCREAMING_SNAKE_CASE = quant_trainer_args
SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE = self._remove_unused_columns(a , description='Calibration')
return DataLoader(
a , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=a , )
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE = self.get_calib_dataloader(a)
SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(a , self.quant_trainer_args , calib=a)
model.eval()
quant_trainer.enable_calibration(a)
logger.info('***** Running calibration *****')
logger.info(f''' Num examples = {self.calib_num}''')
logger.info(f''' Batch size = {calib_dataloader.batch_size}''')
for step, inputs in enumerate(a):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prediction_step(a , a , prediction_loss_only=a)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = model
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a=None , a = "eval") -> str:
SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions)
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
self.log(a)
else:
SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , a)
return metrics
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a = "test") -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.get_test_dataloader(a)
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions , 'predict')
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a)
def SCREAMING_SNAKE_CASE__ ( self , a="./") -> List[Any]:
SCREAMING_SNAKE_CASE = self.eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = next(iter(a))
# saving device - to make it consistent
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
SCREAMING_SNAKE_CASE = tuple(v.to(a) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model.to(a)
model.eval()
model.float()
SCREAMING_SNAKE_CASE = model.module if hasattr(a , 'module') else model
quant_trainer.configure_model(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = os.path.join(a , 'model.onnx')
logger.info(f'''exporting model to {output_model_file}''')
SCREAMING_SNAKE_CASE = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
a , a , a , export_params=a , opset_version=13 , do_constant_folding=a , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=a , )
logger.info('onnx export finished')
| 327
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__snake_case = False
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
_a = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_a = torch.manual_seed(0 )
_a = pipe(
image=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
_a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 320
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = '''Speech2TextFeatureExtractor'''
UpperCamelCase__ : List[str] = '''Speech2TextTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 257
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
A__ = TypeVar('''T''')
class a ( Generic[T] ):
def __init__( self :str ,__lowercase :T ):
snake_case__ : Tuple = data
snake_case__ : Node[T] | None = None
def __str__( self :Dict ):
return F"""{self.data}"""
class a ( Generic[T] ):
def __init__( self :List[str] ):
snake_case__ : Node[T] | None = None
def __iter__( self :str ):
snake_case__ : List[Any] = self.top
while node:
yield node.data
snake_case__ : str = node.next
def __str__( self :List[Any] ):
return "->".join([str(__lowercase ) for item in self] )
def __len__( self :Optional[Any] ):
return len(tuple(iter(self ) ) )
def __lowerCamelCase ( self :Optional[int] ):
return self.top is None
def __lowerCamelCase ( self :List[Any] ,__lowercase :T ):
snake_case__ : Dict = Node(__lowercase )
if not self.is_empty():
snake_case__ : Tuple = self.top
snake_case__ : Optional[int] = node
def __lowerCamelCase ( self :List[Any] ):
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top ,__lowercase )
snake_case__ : Tuple = self.top
snake_case__ : str = self.top.next
return pop_node.data
def __lowerCamelCase ( self :Tuple ):
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : int = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 44
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ = 12_8022
A__ = 12_8028
@require_sentencepiece
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = MaMaaaTokenizer
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self :int ):
super().setUp()
snake_case__ : Tuple = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case__ : Optional[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
snake_case__ : List[Any] = Path(self.tmpdirname )
save_json(__lowercase ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowercase ,save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case__ : str = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Optional[int] ,**__lowercase :Optional[int] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Tuple ):
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = '''</s>'''
snake_case__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) ,__lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Union[str, Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<s>''' )
self.assertEqual(len(__lowercase ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[2, 3, 4, 5, 6] ,)
snake_case__ : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertEqual(__lowercase ,'''This is a test''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
# fmt: off
snake_case__ : Tuple = {'''input_ids''': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = """facebook/m2m100_418M"""
__lowerCAmelCase : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__lowerCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__lowerCAmelCase : Dict = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ):
snake_case__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''' )
snake_case__ : Union[str, Any] = 1
return cls
def __lowerCamelCase ( self :Tuple ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) ,1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) ,1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) ,1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) ,1_2_8_0_6_3 )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(__lowercase ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] ,3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = '''en'''
snake_case__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
self.assertIn(__lowercase ,self.tokenizer.all_special_ids )
# fmt: off
snake_case__ : int = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
snake_case__ : Tuple = self.tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase )
snake_case__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertNotIn(self.tokenizer.eos_token ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__lowercase )
snake_case__ : Any = MaMaaaTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.lang_token_to_id ,__lowercase )
@require_torch
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = '''en'''
snake_case__ : List[Any] = '''fr'''
snake_case__ : Union[str, Any] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__lowercase ,return_tensors='''pt''' )
snake_case__ : Optional[int] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
snake_case__ : Optional[int] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[Any] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
snake_case__ : Any = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(__lowercase ) ,{
# en_XX, A, test, EOS
'''input_ids''': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 1_2_8_0_0_6,
} ,)
| 44
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20
|
'''simple docstring'''
import math
def __lowerCAmelCase (__lowerCAmelCase ):
return math.sqrt(__lowerCAmelCase ) * math.sqrt(__lowerCAmelCase ) == num
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = 0
_UpperCAmelCase : Tuple = n
while left <= right:
_UpperCAmelCase : int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase : str = mid - 1
else:
_UpperCAmelCase : List[str] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ = 1
if upper_limit > 0:
A_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2, upper_limit + 1 ):
for j in range(_UpperCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__snake_case : int = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18
| 0
|
'''simple docstring'''
import os
from distutils.util import strtobool
def _lowerCamelCase ( lowercase : str , lowercase : int ) -> str:
for e in env_keys:
_a = int(os.environ.get(lowercase , -1 ) )
if val >= 0:
return val
return default
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict=False ) -> Tuple:
_a = os.environ.get(lowercase , str(lowercase ) )
return strtobool(lowercase ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any]="no" ) -> Tuple:
_a = os.environ.get(lowercase , str(lowercase ) )
return value
| 63
|
import os
snake_case__ : Any = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def _a ( lowerCamelCase: str ) -> int:
'''simple docstring'''
__A = 0
__A = 0
while index < len(lowerCamelCase ) - 1:
__A = SYMBOLS[numerals[index]]
__A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _a ( lowerCamelCase: int ) -> str:
'''simple docstring'''
__A = ''''''
__A = num // 10_00
numerals += m_count * "M"
num %= 10_00
__A = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _a ( lowerCamelCase: str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
__A = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
__A = filea.readlines()
for line in lines:
__A = line.strip()
__A = parse_roman_numerals(lowerCamelCase )
__A = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 117
| 0
|
'''simple docstring'''
from math import pi
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ):
'''simple docstring'''
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 8
|
'''simple docstring'''
import functools
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[str] = len(lowerCamelCase_ )
snake_case_ : Dict = len(lowerCamelCase_ )
@functools.cache
def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = IFImgaImgSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def a ( self : Optional[Any] ) -> Union[str, Any]:
return self._get_superresolution_dummy_components()
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> List[Any]:
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a ( self : Tuple ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a ( self : int ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a ( self : Any ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a ( self : Tuple ) -> int:
self._test_save_load_local()
def a ( self : Tuple ) -> Optional[int]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 221
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCamelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase = F"""down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase = F"""up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase = F"""up_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
UpperCamelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase = 'mid_block.attentions.0.'
UpperCamelCase = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase = F"""mid_block.resnets.{j}."""
UpperCamelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0."""
UpperCamelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase = F"""mid_block.resnets.{i}."""
UpperCamelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
lowerCAmelCase__ = reshape_weight_for_sd(lowerCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase = {'q': 0, 'k': 1, 'v': 2}
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowerCAmelCase__ = k[: -len(".q_proj.weight" )]
lowerCAmelCase__ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowerCAmelCase__ = k[: -len(".q_proj.bias" )]
lowerCAmelCase__ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
return new_state_dict
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCamelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase = load_file(unet_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCamelCase = load_file(vae_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCamelCase = load_file(text_enc_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCamelCase = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCamelCase = convert_unet_state_dict(unet_state_dict)
UpperCamelCase = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase = convert_vae_state_dict(vae_state_dict)
UpperCamelCase = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 221
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase_ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''albert'''
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=30000 , __UpperCAmelCase : List[str]=128 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : List[str]=64 , __UpperCAmelCase : str=16384 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : int="gelu_new" , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=512 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]="absolute" , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Dict=3 , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = embedding_size
_A = hidden_size
_A = num_hidden_layers
_A = num_hidden_groups
_A = num_attention_heads
_A = inner_group_num
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout_prob
_A = position_embedding_type
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 79
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 0
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , a = True , a = 1 / 255 , a = True , a = 8 , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = pad_size
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_image_size(a)
SCREAMING_SNAKE_CASE = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE = (old_width // size + 1) * size - old_width
return pad(a , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE = [self.pad(a , size=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 327
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : List[Any] = logging.get_logger(__name__)
a_ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a_ : str = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
a_ : List[Any] = {'allegro/herbert-base-cased': 5_14}
a_ : Dict = {}
class _snake_case ( A__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = HerbertTokenizer
def __init__( self , a=None , a=None , a=None , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a="</s>" , **a , ) -> Dict:
super().__init__(
a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 327
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a_ :
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A__ ( self ) -> torch.Tensor:
"""simple docstring"""
UpperCamelCase = torch.arange(self.height * self.width )
UpperCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(_SCREAMING_SNAKE_CASE , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase ,*UpperCamelCase = self.shape
UpperCamelCase = int(np.prod(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = self.get_image_coords()
UpperCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCamelCase = self.get_camera_rays(_SCREAMING_SNAKE_CASE )
UpperCamelCase = rays.view(_SCREAMING_SNAKE_CASE , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
UpperCamelCase ,*UpperCamelCase ,UpperCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCamelCase = coords.view(_SCREAMING_SNAKE_CASE , -1 , 2 )
UpperCamelCase = self.resolution()
UpperCamelCase = self.fov()
UpperCamelCase = (flat.float() / (res - 1)) * 2 - 1
UpperCamelCase = fracs * torch.tan(fov / 2 )
UpperCamelCase = fracs.view(_SCREAMING_SNAKE_CASE , -1 , 2 )
UpperCamelCase = (
self.z.view(_SCREAMING_SNAKE_CASE , 1 , 3 )
+ self.x.view(_SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCamelCase = directions / directions.norm(dim=-1 , keepdim=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(_SCREAMING_SNAKE_CASE , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , 2 , 3 )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowercase__ ( __UpperCamelCase )-> DifferentiableProjectiveCamera:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCamelCase = np.array([np.sin(__UpperCamelCase ), np.cos(__UpperCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCamelCase = -z * 4
UpperCamelCase = np.array([np.cos(__UpperCamelCase ), -np.sin(__UpperCamelCase ), 0.0] )
UpperCamelCase = np.cross(__UpperCamelCase , __UpperCamelCase )
origins.append(__UpperCamelCase )
xs.append(__UpperCamelCase )
ys.append(__UpperCamelCase )
zs.append(__UpperCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , width=__UpperCamelCase , height=__UpperCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__UpperCamelCase )) , )
| 321
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321
| 1
|
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
__lowerCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
UpperCamelCase__ = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
UpperCamelCase__ = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A="uniform_average" , _A=True ):
"""simple docstring"""
__lowerCAmelCase = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 102
| 0
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( lowerCamelCase__="" ):
"""simple docstring"""
lowercase__ : Dict = tempfile.mkdtemp()
return os.path.join(lowerCamelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Tuple ):
lowercase__ : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase__ : List[str] = AgentAudio(SCREAMING_SNAKE_CASE )
lowercase__ : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
# Ensure that the file contains the same value as the original tensor
lowercase__ , lowercase__ : Optional[int] = sf.read(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , torch.tensor(SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
def snake_case ( self : int ):
lowercase__ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase__ : Dict = get_new_path(suffix=".wav" )
sf.write(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 16_000 )
lowercase__ : Optional[int] = AgentAudio(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , SCREAMING_SNAKE_CASE )
@require_vision
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str ):
lowercase__ : Optional[Any] = torch.randint(0 , 256 , (64, 64, 3) )
lowercase__ : List[Any] = AgentImage(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : int ):
lowercase__ : Dict = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowercase__ : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE )
lowercase__ : str = AgentImage(SCREAMING_SNAKE_CASE )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowercase__ : List[str] = Image.open(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = AgentImage(SCREAMING_SNAKE_CASE )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str ):
lowercase__ : Dict = "Hey!"
lowercase__ : Any = AgentText(SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE , agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 130
|
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 130
| 1
|
"""simple docstring"""
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> str:
_a = arr.split(''',''' )
def _UpperCAmelCase ( self ) -> List[str]:
_a = [int(self.array[0] )] * len(self.array )
_a = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_a = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_a = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__snake_case = input('''please input some numbers:''')
__snake_case = SubArray(whole_array)
__snake_case = array.solve_sub_array()
print(('''the results is:''', re))
| 362
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Optional[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) )
def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def A ( self : Tuple , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std
return embeds
def A ( self : List[str] , _a : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean
return embeds
| 47
| 1
|
"""simple docstring"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Tuple = arr.split(',' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = [int(self.array[0] )] * len(self.array )
lowerCAmelCase_ : Any = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowerCAmelCase_ : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowerCAmelCase_ : Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase__ : Optional[Any] = input("""please input some numbers:""")
lowercase__ : Optional[int] = SubArray(whole_array)
lowercase__ : int = array.solve_sub_array()
print(("""the results is:""", re))
| 353
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """open-llama"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=1_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_1_0_0_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : str="silu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[str] = rms_norm_eps
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_dropout_prob
lowerCAmelCase_ : Tuple = use_stable_embedding
lowerCAmelCase_ : Optional[Any] = shared_input_output_embedding
lowerCAmelCase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
lowerCAmelCase_ : int = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 289
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=10_24 ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Dict = [], []
__lowerCAmelCase: Dict = list(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase , __lowerCAmelCase: Tuple = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE : Tuple ):
return tok(SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCAmelCase: Optional[int] = new_src + ' ' + src
__lowerCAmelCase: Optional[int] = new_tgt + ' ' + tgt
if is_too_big(SCREAMING_SNAKE_CASE ) or is_too_big(SCREAMING_SNAKE_CASE ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE )
finished_tgt.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: List[str] = src, tgt
else: # can fit, keep adding
__lowerCAmelCase , __lowerCAmelCase: List[str] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE )
finished_tgt.append(SCREAMING_SNAKE_CASE )
return finished_src, finished_tgt
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = Path(SCREAMING_SNAKE_CASE )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
for split in ["train"]:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
__lowerCAmelCase: int = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE ).open().readlines()]
__lowerCAmelCase: Union[str, Any] = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE ).open().readlines()]
__lowerCAmelCase , __lowerCAmelCase: int = pack_examples(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''packed {split} split from {len(SCREAMING_SNAKE_CASE )} examples -> {len(SCREAMING_SNAKE_CASE )}.''' )
Path(save_path / f'''{split}.source''' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE ) )
Path(save_path / f'''{split}.target''' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE ) )
for split in ["val", "test"]:
__lowerCAmelCase , __lowerCAmelCase: int = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(SCREAMING_SNAKE_CASE , save_path / f'''{split}.source''' )
shutil.copyfile(SCREAMING_SNAKE_CASE , save_path / f'''{split}.target''' )
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE , default=1_28 )
parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = parser.parse_args()
__lowerCAmelCase: Dict = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 322
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
if index == r:
for j in range(lowerCamelCase_ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase_ : List[str] = arr[i]
combination_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ) -> Any:
lowercase_ : List[str] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 0 , lowerCamelCase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase : List[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 358
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : Dict = set_counts
lowercase_ : List[Any] = max(lowercase_ )
lowercase_ : str = len(lowercase_ )
lowercase_ : str = [1] * num_sets
lowercase_ : Dict = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : List[Any] = self.get_parent(lowercase_ )
lowercase_ : Union[str, Any] = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : int = 0
lowercase_ : List[Any] = src_parent
lowercase_ : List[Any] = self.set_counts[src_parent]
lowercase_ : Tuple = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : int = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 21
| 0
|
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A : Tuple = get_tests_dir("fixtures")
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 500
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__magic_name__ ) as mock_head:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Optional[int] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : int ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="test-feature-extractor" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __A ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __A ( self : List[Any] ) -> Dict:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 118
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowerCamelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowerCamelCase = 'UperNetConfig'
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : Union[int, Tuple[int, int]] , __snake_case : Union[int, Tuple[int, int], str] = 0 , __snake_case : bool = False , __snake_case : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
UpperCAmelCase_ = nn.Convad(
in_channels=__snake_case , out_channels=__snake_case , kernel_size=__snake_case , padding=__snake_case , bias=__snake_case , dilation=__snake_case , )
UpperCAmelCase_ = nn.BatchNormad(__snake_case )
UpperCAmelCase_ = nn.ReLU()
def lowerCamelCase_ ( self : int , __snake_case : torch.Tensor ):
UpperCAmelCase_ = self.conv(__snake_case )
UpperCAmelCase_ = self.batch_norm(__snake_case )
UpperCAmelCase_ = self.activation(__snake_case )
return output
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int ):
super().__init__()
UpperCAmelCase_ = [
nn.AdaptiveAvgPoolad(__snake_case ),
UperNetConvModule(__snake_case , __snake_case , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self : List[Any] , __snake_case : torch.Tensor ):
UpperCAmelCase_ = input
for layer in self.layers:
UpperCAmelCase_ = layer(__snake_case )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : Tuple[int, ...] , __snake_case : int , __snake_case : int , __snake_case : bool ):
super().__init__()
UpperCAmelCase_ = pool_scales
UpperCAmelCase_ = align_corners
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = channels
UpperCAmelCase_ = []
for i, pool_scale in enumerate(__snake_case ):
UpperCAmelCase_ = UperNetPyramidPoolingBlock(pool_scale=__snake_case , in_channels=__snake_case , channels=__snake_case )
self.blocks.append(__snake_case )
self.add_module(str(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self : List[str] , __snake_case : torch.Tensor ):
UpperCAmelCase_ = []
for ppm in self.blocks:
UpperCAmelCase_ = ppm(__snake_case )
UpperCAmelCase_ = nn.functional.interpolate(
__snake_case , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__snake_case )
return ppm_outs
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : Dict , __snake_case : List[str] ):
super().__init__()
UpperCAmelCase_ = config
UpperCAmelCase_ = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = config.hidden_size
UpperCAmelCase_ = False
UpperCAmelCase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase_ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase_ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase_ = nn.ModuleList()
UpperCAmelCase_ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase_ = UperNetConvModule(__snake_case , self.channels , kernel_size=1 )
UpperCAmelCase_ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__snake_case )
self.fpn_convs.append(__snake_case )
UpperCAmelCase_ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowerCamelCase_ ( self : Tuple ):
self.apply(self._init_weights )
def lowerCamelCase_ ( self : List[Any] , __snake_case : List[Any] ):
if isinstance(__snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase_ ( self : List[Any] , __snake_case : int ):
UpperCAmelCase_ = inputs[-1]
UpperCAmelCase_ = [x]
psp_outs.extend(self.psp_modules(__snake_case ) )
UpperCAmelCase_ = torch.cat(__snake_case , dim=1 )
UpperCAmelCase_ = self.bottleneck(__snake_case )
return output
def lowerCamelCase_ ( self : str , __snake_case : torch.Tensor ):
# build laterals
UpperCAmelCase_ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__snake_case ) )
# build top-down path
UpperCAmelCase_ = len(__snake_case )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase_ = laterals[i - 1].shape[2:]
UpperCAmelCase_ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__snake_case , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase_ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase_ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase_ = torch.cat(__snake_case , dim=1 )
UpperCAmelCase_ = self.fpn_bottleneck(__snake_case )
UpperCAmelCase_ = self.classifier(__snake_case )
return output
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 3 , __snake_case : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
UpperCAmelCase_ = config
UpperCAmelCase_ = config.auxiliary_in_channels
UpperCAmelCase_ = config.auxiliary_channels
UpperCAmelCase_ = config.auxiliary_num_convs
UpperCAmelCase_ = config.auxiliary_concat_input
UpperCAmelCase_ = in_index
UpperCAmelCase_ = (kernel_size // 2) * dilation
UpperCAmelCase_ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__snake_case , padding=__snake_case , dilation=__snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__snake_case , padding=__snake_case , dilation=__snake_case ) )
if self.num_convs == 0:
UpperCAmelCase_ = nn.Identity()
else:
UpperCAmelCase_ = nn.Sequential(*__snake_case )
if self.concat_input:
UpperCAmelCase_ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__snake_case , padding=kernel_size // 2 )
UpperCAmelCase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowerCamelCase_ ( self : Tuple ):
self.apply(self._init_weights )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Tuple ):
if isinstance(__snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase_ ( self : List[str] , __snake_case : torch.Tensor ):
# just take the relevant feature maps
UpperCAmelCase_ = encoder_hidden_states[self.in_index]
UpperCAmelCase_ = self.convs(__snake_case )
if self.concat_input:
UpperCAmelCase_ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase_ = self.classifier(__snake_case )
return output
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Tuple = UperNetConfig
lowerCAmelCase : Tuple = 'pixel_values'
lowerCAmelCase : Tuple = True
def lowerCamelCase_ ( self : List[str] , __snake_case : int ):
if isinstance(__snake_case , __snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowerCamelCase_ ( self : List[str] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Dict , __snake_case : int=False ):
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = value
_lowerCamelCase = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , _A , )
class a ( _A ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : Any ):
super().__init__(__snake_case )
UpperCAmelCase_ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase_ = UperNetHead(__snake_case , in_channels=self.backbone.channels )
UpperCAmelCase_ = UperNetFCNHead(__snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__snake_case , config_class=_CONFIG_FOR_DOC )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , ):
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase_ = self.backbone.forward_with_filtered_kwargs(
__snake_case , output_hidden_states=__snake_case , output_attentions=__snake_case )
UpperCAmelCase_ = outputs.feature_maps
UpperCAmelCase_ = self.decode_head(__snake_case )
UpperCAmelCase_ = nn.functional.interpolate(__snake_case , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__snake_case )
UpperCAmelCase_ = None
if self.auxiliary_head is not None:
UpperCAmelCase_ = self.auxiliary_head(__snake_case )
UpperCAmelCase_ = nn.functional.interpolate(
__snake_case , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__snake_case )
UpperCAmelCase_ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase_ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase_ = loss_fct(__snake_case , __snake_case )
UpperCAmelCase_ = loss_fct(__snake_case , __snake_case )
UpperCAmelCase_ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase_ = (logits,) + outputs[1:]
else:
UpperCAmelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 177
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase = False
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : Any ):
return 12
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 12
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return 32
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCamelCase_ ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__snake_case )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = 12
UpperCAmelCase_ = 12
UpperCAmelCase_ = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
UpperCAmelCase_ = TransformeraDModel(**__snake_case )
return model
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.dummy_vqvae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_transformer
UpperCAmelCase_ = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase_ = LearnedClassifierFreeSamplingEmbeddings(learnable=__snake_case )
UpperCAmelCase_ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = '''teddy bear playing in the pool'''
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.dummy_vqvae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_transformer
UpperCAmelCase_ = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=__snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCAmelCase_ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = '''teddy bear playing in the pool'''
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
UpperCAmelCase_ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
UpperCAmelCase_ = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 177
| 1
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( a_ ):
'''simple docstring'''
def __init__( self: int , *snake_case: List[Any] , snake_case: List[str]=None , snake_case: Union[str, Any]=None , **snake_case: Union[str, Any] ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
snake_case_ :Optional[int] = eval_examples
snake_case_ :Tuple = post_process_function
def lowerCAmelCase_ ( self: str , snake_case: List[Any]=None , snake_case: int=None , snake_case: Optional[Any]=None , snake_case: List[Any] = "eval" ) -> Union[str, Any]:
snake_case_ :List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ :List[str] = self.get_eval_dataloader(_lowercase )
snake_case_ :List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ :Optional[int] = self.compute_metrics
snake_case_ :List[str] = None
snake_case_ :Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
snake_case_ :Any = time.time()
try:
snake_case_ :Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
snake_case_ :Dict = compute_metrics
snake_case_ :Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case_ :List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
snake_case_ :Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
snake_case_ :List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
snake_case_ :List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ :List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def lowerCAmelCase_ ( self: Dict , snake_case: Optional[Any] , snake_case: Optional[int] , snake_case: str=None , snake_case: Any = "test" ) -> str:
snake_case_ :Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ :List[Any] = self.compute_metrics
snake_case_ :int = None
snake_case_ :Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
snake_case_ :Union[str, Any] = time.time()
try:
snake_case_ :List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
snake_case_ :Optional[Any] = compute_metrics
snake_case_ :Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ :Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
snake_case_ :List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
snake_case_ :int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 66
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__snake_case : Dict = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
_A : Dict = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = 8 , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Tuple = do_rescale
a_ : Dict = rescale_factor
a_ : int = do_pad
a_ : Optional[int] = pad_size
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None ) -> int:
a_ , a_ : str = get_image_size(_lowercase )
a_ : Tuple = (old_height // size + 1) * size - old_height
a_ : List[Any] = (old_width // size + 1) * size - old_width
return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> List[str]:
a_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Tuple = do_pad if do_pad is not None else self.do_pad
a_ : Tuple = pad_size if pad_size is not None else self.pad_size
a_ : Tuple = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
a_ : Tuple = [to_numpy_array(_lowercase ) for image in images]
if do_rescale:
a_ : Dict = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_pad:
a_ : str = [self.pad(_lowercase , size=_lowercase ) for image in images]
a_ : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 248
| 0
|
from __future__ import annotations
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = order
# a_{0} ... a_{k}
UpperCamelCase_: int = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase_: str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase_: Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase_: List[Any] = [0.0] * self.order
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if len(_lowerCamelCase ) < self.order:
UpperCamelCase_: List[Any] = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
UpperCamelCase_: str = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
UpperCamelCase_: int = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = a_coeffs
UpperCamelCase_: Optional[int] = b_coeffs
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: List[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase_: int = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase_: Union[str, Any] = self.input_history[:-1]
UpperCamelCase_: Tuple = self.output_history[:-1]
UpperCamelCase_: int = sample
UpperCamelCase_: Optional[Any] = result
return result
| 292
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
A_ : Optional[int] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Dict = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase_: Tuple = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case (UpperCAmelCase__ ) -> List[str]:
if dtype == torch.bool:
return 1 / 8
UpperCamelCase_: Optional[Any] = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCamelCase_: List[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
# Construct model
if bloom_config_file == "":
UpperCamelCase_: List[str] = BloomConfig()
else:
UpperCamelCase_: List[str] = BloomConfig.from_json_file(UpperCAmelCase__ )
if shard_model:
UpperCamelCase_: str = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: List[str] = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Optional[int] = {'weight_map': {}, 'metadata': {}}
UpperCamelCase_: str = 0
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: int = BloomConfig()
for j, file in enumerate(UpperCAmelCase__ ):
print('Processing file: {}'.format(UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: List[Any] = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: List[str] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Optional[int] = list(temp.keys() )
for key in keys:
UpperCamelCase_: List[Any] = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Dict = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Dict = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Optional[int] = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase__ , os.path.join(
UpperCAmelCase__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCamelCase_: int = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCamelCase_: Dict = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) )
UpperCamelCase_: Union[str, Any] = BloomConfig()
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
UpperCamelCase_: Optional[int] = total_size
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_: Tuple = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + '\n'
f.write(UpperCAmelCase__ )
else:
UpperCamelCase_: Optional[Any] = BloomModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: Tuple = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i, file in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: Union[str, Any] = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: Any = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: Union[str, Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Dict = list(temp.keys() )
for key in keys:
UpperCamelCase_: Any = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Any = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Optional[int] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Tuple = tensors[key] / pretraining_tp
UpperCamelCase_: Any = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCamelCase_: Any = set(other_keys.missing_keys )
else:
UpperCamelCase_: int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCamelCase_: Tuple = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
A_ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 292
| 1
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return EnvironmentCommand()
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = parser.add_parser('env' )
download_parser.set_defaults(func=UpperCamelCase_ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = huggingface_hub.__version__
__lowerCamelCase = 'not installed'
__lowerCamelCase = 'NA'
if is_torch_available():
import torch
__lowerCamelCase = torch.__version__
__lowerCamelCase = torch.cuda.is_available()
__lowerCamelCase = 'not installed'
if is_transformers_available():
import transformers
__lowerCamelCase = transformers.__version__
__lowerCamelCase = 'not installed'
if is_accelerate_available():
import accelerate
__lowerCamelCase = accelerate.__version__
__lowerCamelCase = 'not installed'
if is_xformers_available():
import xformers
__lowerCamelCase = xformers.__version__
__lowerCamelCase = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(UpperCamelCase_ ) )
return info
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 90
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : torch.FloatTensor
class _a ( nn.Module ):
def __init__( self: int , UpperCamelCase_: int=3 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Union[str, Any]=("DownEncoderBlock2D",) , UpperCamelCase_: Optional[int]=(64,) , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Dict=32 , UpperCamelCase_: Any="silu" , UpperCamelCase_: Optional[int]=True , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ = layers_per_block
lowercase__ = torch.nn.Convad(
UpperCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ = None
lowercase__ = nn.ModuleList([] )
# down
lowercase__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase_ ):
lowercase__ = output_channel
lowercase__ = block_out_channels[i]
lowercase__ = i == len(UpperCamelCase_ ) - 1
lowercase__ = get_down_block(
UpperCamelCase_ , num_layers=self.layers_per_block , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase_ , resnet_groups=UpperCamelCase_ , attention_head_dim=UpperCamelCase_ , temb_channels=UpperCamelCase_ , )
self.down_blocks.append(UpperCamelCase_ )
# mid
lowercase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase_ , temb_channels=UpperCamelCase_ , )
# out
lowercase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase_ , eps=1E-6 )
lowercase__ = nn.SiLU()
lowercase__ = 2 * out_channels if double_z else out_channels
lowercase__ = nn.Convad(block_out_channels[-1] , UpperCamelCase_ , 3 , padding=1 )
lowercase__ = False
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: str ) -> str:
"""simple docstring"""
lowercase__ = x
lowercase__ = self.conv_in(UpperCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase_: Dict ):
def custom_forward(*UpperCamelCase_: List[str] ):
return module(*UpperCamelCase_ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
lowercase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase_ ) , UpperCamelCase_ , use_reentrant=UpperCamelCase_ )
# middle
lowercase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase_ , use_reentrant=UpperCamelCase_ )
else:
for down_block in self.down_blocks:
lowercase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase_ ) , UpperCamelCase_ )
# middle
lowercase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase_ )
else:
# down
for down_block in self.down_blocks:
lowercase__ = down_block(UpperCamelCase_ )
# middle
lowercase__ = self.mid_block(UpperCamelCase_ )
# post-process
lowercase__ = self.conv_norm_out(UpperCamelCase_ )
lowercase__ = self.conv_act(UpperCamelCase_ )
lowercase__ = self.conv_out(UpperCamelCase_ )
return sample
class _a ( nn.Module ):
def __init__( self: Dict , UpperCamelCase_: int=3 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: int=("UpDecoderBlock2D",) , UpperCamelCase_: Any=(64,) , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: Optional[Any]="silu" , UpperCamelCase_: Dict="group" , ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ = layers_per_block
lowercase__ = nn.Convad(
UpperCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ = None
lowercase__ = nn.ModuleList([] )
lowercase__ = in_channels if norm_type == '''spatial''' else None
# mid
lowercase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase_ , temb_channels=UpperCamelCase_ , )
# up
lowercase__ = list(reversed(UpperCamelCase_ ) )
lowercase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase_ ):
lowercase__ = output_channel
lowercase__ = reversed_block_out_channels[i]
lowercase__ = i == len(UpperCamelCase_ ) - 1
lowercase__ = get_up_block(
UpperCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , prev_output_channel=UpperCamelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase_ , resnet_groups=UpperCamelCase_ , attention_head_dim=UpperCamelCase_ , temb_channels=UpperCamelCase_ , resnet_time_scale_shift=UpperCamelCase_ , )
self.up_blocks.append(UpperCamelCase_ )
lowercase__ = output_channel
# out
if norm_type == "spatial":
lowercase__ = SpatialNorm(block_out_channels[0] , UpperCamelCase_ )
else:
lowercase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase_ , eps=1E-6 )
lowercase__ = nn.SiLU()
lowercase__ = nn.Convad(block_out_channels[0] , UpperCamelCase_ , 3 , padding=1 )
lowercase__ = False
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None ) -> int:
"""simple docstring"""
lowercase__ = z
lowercase__ = self.conv_in(UpperCamelCase_ )
lowercase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase_: List[str] ):
def custom_forward(*UpperCamelCase_: int ):
return module(*UpperCamelCase_ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
lowercase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase_ , UpperCamelCase_ , use_reentrant=UpperCamelCase_ )
lowercase__ = sample.to(UpperCamelCase_ )
# up
for up_block in self.up_blocks:
lowercase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , use_reentrant=UpperCamelCase_ )
else:
# middle
lowercase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = sample.to(UpperCamelCase_ )
# up
for up_block in self.up_blocks:
lowercase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
else:
# middle
lowercase__ = self.mid_block(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = sample.to(UpperCamelCase_ )
# up
for up_block in self.up_blocks:
lowercase__ = up_block(UpperCamelCase_ , UpperCamelCase_ )
# post-process
if latent_embeds is None:
lowercase__ = self.conv_norm_out(UpperCamelCase_ )
else:
lowercase__ = self.conv_norm_out(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.conv_act(UpperCamelCase_ )
lowercase__ = self.conv_out(UpperCamelCase_ )
return sample
class _a ( nn.Module ):
def __init__( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[str]="random" , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Dict=True ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = n_e
lowercase__ = vq_embed_dim
lowercase__ = beta
lowercase__ = legacy
lowercase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
lowercase__ = self.used.shape[0]
lowercase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ = self.re_embed
lowercase__ = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
lowercase__ = n_e
lowercase__ = sane_index_shape
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
lowercase__ = inds.shape
assert len(UpperCamelCase_ ) > 1
lowercase__ = inds.reshape(ishape[0] , -1 )
lowercase__ = self.used.to(UpperCamelCase_ )
lowercase__ = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ = match.argmax(-1 )
lowercase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ = self.unknown_index
return new.reshape(UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
lowercase__ = inds.shape
assert len(UpperCamelCase_ ) > 1
lowercase__ = inds.reshape(ishape[0] , -1 )
lowercase__ = self.used.to(UpperCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ = 0 # simply set to zero
lowercase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase_ )
return back.reshape(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ = torch.argmin(torch.cdist(UpperCamelCase_ , self.embedding.weight ) , dim=1 )
lowercase__ = self.embedding(UpperCamelCase_ ).view(z.shape )
lowercase__ = None
lowercase__ = None
# compute loss for embedding
if not self.legacy:
lowercase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ = self.remap_to_used(UpperCamelCase_ )
lowercase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] ) -> List[str]:
"""simple docstring"""
if self.remap is not None:
lowercase__ = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ = self.unmap_to_all(UpperCamelCase_ )
lowercase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ = self.embedding(UpperCamelCase_ )
if shape is not None:
lowercase__ = z_q.view(UpperCamelCase_ )
# reshape back to match original input shape
lowercase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _a ( UpperCamelCase__ ):
def __init__( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=False ) -> str:
"""simple docstring"""
lowercase__ = parameters
lowercase__ , lowercase__ = torch.chunk(UpperCamelCase_ , 2 , dim=1 )
lowercase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ = deterministic
lowercase__ = torch.exp(0.5 * self.logvar )
lowercase__ = torch.exp(self.logvar )
if self.deterministic:
lowercase__ = lowercase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
lowercase__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: str=None ) -> Tuple:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Any=[1, 2, 3] ) -> Tuple:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
return self.mean
| 110
| 0
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__snake_case =0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__snake_case =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = WATERMARK_BITS
lowerCAmelCase = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str ) -> str:
if images.shape[-1] < 2_5_6:
return images
lowerCAmelCase = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase = [self.encoder.encode(_A , 'dwtDct' ) for image in images]
lowerCAmelCase = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
lowerCAmelCase = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 363
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple ) -> Tuple:
# test for the above condition
self.test()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = 0
lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCAmelCase ( self : Dict ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Any ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : str , UpperCAmelCase__ : List[int] ) -> Union[str, Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCAmelCase = token_ids
lowerCAmelCase = len(self.token_ids )
lowerCAmelCase = -1 # the index of the currently fulfilled step
lowerCAmelCase = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase = True
lowerCAmelCase = completed
else:
# failed to make progress.
lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = False
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str]=False ) -> Optional[int]:
lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.fulfilled_idx
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : str=True ) -> str:
lowerCAmelCase = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowerCAmelCase = {}
for token_ids in nested_token_ids:
lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowerCAmelCase = {}
lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
lowerCAmelCase = root
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = self.trie
for current_token in current_seq:
lowerCAmelCase = start[current_token]
lowerCAmelCase = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , UpperCAmelCase__ : List[List[int]] ) -> List[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCAmelCase = DisjunctiveTrie(UpperCAmelCase__ )
lowerCAmelCase = nested_token_ids
lowerCAmelCase = self.trie.max_height
lowerCAmelCase = []
lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Any:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowerCAmelCase = True
else:
lowerCAmelCase = True
self.reset()
lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = False
lowerCAmelCase = []
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.current_seq
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : Tuple , UpperCAmelCase__ : List[Constraint] ) -> str:
lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase = max([c.seqlen for c in constraints] )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = False
self.init_state()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = None
lowerCAmelCase = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[List[int]] ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase , lowerCAmelCase = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCAmelCase , lowerCAmelCase = False, False
if self.completed:
lowerCAmelCase = True
lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowerCAmelCase = None
if not complete and stepped:
lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]:
lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 55
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__lowercase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(A__ ).content
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter Video/IGTV url: ''').strip()
lowerCAmelCase__ = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f'Done. Video saved to disk as {file_name}.')
| 104
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : Union[str, Any] = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] ,lowercase__ : Dict=None ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase__ ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Any]=None ,**lowercase__ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[str] ,**lowercase__ : int ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase__ ,)
return self.image_processor
| 104
| 1
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCamelCase ( _UpperCamelCase=32 , _UpperCamelCase=10 , _UpperCamelCase=100 , _UpperCamelCase=1026 , _UpperCamelCase=True , _UpperCamelCase="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__lowerCAmelCase , __lowerCAmelCase = generate_datasets(
_UpperCamelCase , _UpperCamelCase , number=_UpperCamelCase , min_len=1026 , trim=_UpperCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__lowerCAmelCase = load_gpta("gpt2" ).to(_UpperCamelCase )
print("computing perplexity on objective set" )
__lowerCAmelCase = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).item()
print("perplexity on objective set:" , _UpperCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=15 , _UpperCamelCase=128 , _UpperCamelCase=100 , _UpperCamelCase="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__lowerCAmelCase = SecondaryLearner(_UpperCamelCase )
# Train secondary learner
__lowerCAmelCase = train_secondary_learner(
_UpperCamelCase , _UpperCamelCase , max_epochs=_UpperCamelCase , batch_size=_UpperCamelCase , eval_freq=100 , igf_model_path=_UpperCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=32 , _UpperCamelCase=1000 , _UpperCamelCase=16 , _UpperCamelCase=1.0 , _UpperCamelCase=recopy_gpta , _UpperCamelCase=None , _UpperCamelCase=10 , _UpperCamelCase="gpt2_finetuned.pt" , ):
'''simple docstring'''
__lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__lowerCAmelCase = RandomSampler(_UpperCamelCase )
__lowerCAmelCase = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase )
__lowerCAmelCase = max_steps // (len(_UpperCamelCase )) + 1
__lowerCAmelCase = 0
__lowerCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = recopy_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCamelCase )
secondary_learner.eval()
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
# Compute the performance of the transformer model at the beginning
__lowerCAmelCase = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print("Test perplexity, step" , _UpperCamelCase , ":" , _UpperCamelCase )
for epoch in range(int(_UpperCamelCase ) ):
for step, example in enumerate(_UpperCamelCase ):
torch.cuda.empty_cache()
__lowerCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
__lowerCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowerCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
__lowerCAmelCase = True
if secondary_learner is not None:
__lowerCAmelCase = secondary_learner.forward(
torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowerCAmelCase = -1
if predicted_q < threshold:
__lowerCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowerCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowerCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowerCAmelCase = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print("Test perplexity, step" , _UpperCamelCase , ":" , _UpperCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_UpperCamelCase , default=_UpperCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_UpperCamelCase , default=_UpperCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_UpperCamelCase , type=_UpperCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_UpperCamelCase , default=_UpperCamelCase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_UpperCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_UpperCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_UpperCamelCase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=_UpperCamelCase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_UpperCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_UpperCamelCase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_UpperCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_UpperCamelCase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=_UpperCamelCase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_UpperCamelCase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_UpperCamelCase , type=_UpperCamelCase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_UpperCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_UpperCamelCase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_UpperCamelCase , type=_UpperCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_UpperCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__lowerCAmelCase = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__lowerCAmelCase = training_secondary_learner(
_UpperCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowerCAmelCase , __lowerCAmelCase = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_UpperCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCamelCase , secondary_learner=_UpperCamelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 259
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A : Any = "bert-base-cased"
A : Any = "google/pegasus-xsum"
A : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
A : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
A : Optional[int] = "patrickvonplaten/t5-tiny-random"
A : int = "sshleifer/bart-tiny-random"
A : Optional[int] = "sshleifer/tiny-mbart"
A : Any = "sshleifer/tiny-marian-en-de"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "\n".join(_UpperCamelCase )
Path(_UpperCamelCase ).open("w" ).writelines(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.source" ) , _UpperCamelCase )
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.target" ) , _UpperCamelCase )
return tmp_dir
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowerCAmelCase , __lowerCAmelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__a , __a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowerCAmelCase = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=20 , max_target_length=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def snake_case ( self ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowerCAmelCase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__a , __a , 1_28 , __a )
__lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
__lowerCAmelCase = {x.name for x in save_dir.iterdir()}
__lowerCAmelCase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a ) < len(__a )
assert len(__a ) == 1
assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def snake_case ( self ):
if not FAIRSEQ_AVAILABLE:
return
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=64 )
__lowerCAmelCase = 64
__lowerCAmelCase = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a )
__lowerCAmelCase = [len(__a ) for x in batch_sampler]
assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a ) == len(__a ) # no dropped or added examples
__lowerCAmelCase = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = []
__lowerCAmelCase = []
for batch in data_loader:
__lowerCAmelCase = batch["input_ids"].shape
__lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowerCAmelCase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(__a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a )
assert num_src_per_batch[0] == max(__a )
if failures:
raise AssertionError(f"too many tokens in {len(__a )} batches" )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=5_12 )
__lowerCAmelCase = 2
__lowerCAmelCase = ds.make_sortish_sampler(__a , shuffle=__a )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a )
__lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids" ):
return [batch[k].eq(__a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k="labels" ) ) < sum(count_pad_tokens(__a , k="labels" ) )
assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) )
assert len(__a ) == len(__a )
def snake_case ( self , __a=10_00 , __a=1_28 ):
if os.getenv("USE_REAL_DATA" , __a ):
__lowerCAmelCase = "examples/seq2seq/wmt_en_ro"
__lowerCAmelCase = max_len * 2 * 64
if not Path(__a ).joinpath("train.len" ).exists():
save_len_file(__a , __a )
else:
__lowerCAmelCase = "examples/seq2seq/test_data/wmt_en_ro"
__lowerCAmelCase = max_len * 4
save_len_file(__a , __a )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset()
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__a ) )
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__a ) )
assert idsa.intersection(__a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a , use_fast=__a )
if tok_name == MBART_TINY:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
| 259
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase_ = 'UperNetConfig'
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : Union[int, Tuple[int, int]], _lowerCamelCase : Union[int, Tuple[int, int], str] = 0, _lowerCamelCase : bool = False, _lowerCamelCase : Union[int, Tuple[int, int]] = 1, ):
'''simple docstring'''
super().__init__()
__A = nn.Convad(
in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, kernel_size=_lowerCamelCase, padding=_lowerCamelCase, bias=_lowerCamelCase, dilation=_lowerCamelCase, )
__A = nn.BatchNormad(_lowerCamelCase )
__A = nn.ReLU()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : torch.Tensor ):
'''simple docstring'''
__A = self.conv(_lowerCamelCase )
__A = self.batch_norm(_lowerCamelCase )
__A = self.activation(_lowerCamelCase )
return output
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
__A = [
nn.AdaptiveAvgPoolad(_lowerCamelCase ),
UperNetConvModule(_lowerCamelCase, _lowerCamelCase, kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : torch.Tensor ):
'''simple docstring'''
__A = input
for layer in self.layers:
__A = layer(_lowerCamelCase )
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : Tuple[int, ...], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : bool ):
'''simple docstring'''
super().__init__()
__A = pool_scales
__A = align_corners
__A = in_channels
__A = channels
__A = []
for i, pool_scale in enumerate(_lowerCamelCase ):
__A = UperNetPyramidPoolingBlock(pool_scale=_lowerCamelCase, in_channels=_lowerCamelCase, channels=_lowerCamelCase )
self.blocks.append(_lowerCamelCase )
self.add_module(str(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : torch.Tensor ):
'''simple docstring'''
__A = []
for ppm in self.blocks:
__A = ppm(_lowerCamelCase )
__A = nn.functional.interpolate(
_lowerCamelCase, size=x.size()[2:], mode='''bilinear''', align_corners=self.align_corners )
ppm_outs.append(_lowerCamelCase )
return ppm_outs
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__()
__A = config
__A = config.pool_scales # e.g. (1, 2, 3, 6)
__A = in_channels
__A = config.hidden_size
__A = False
__A = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
# PSP Module
__A = UperNetPyramidPoolingModule(
self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, )
__A = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, )
# FPN Module
__A = nn.ModuleList()
__A = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__A = UperNetConvModule(_lowerCamelCase, self.channels, kernel_size=1 )
__A = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 )
self.lateral_convs.append(_lowerCamelCase )
self.fpn_convs.append(_lowerCamelCase )
__A = UperNetConvModule(
len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
self.apply(self._init_weights )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : str ):
'''simple docstring'''
if isinstance(_lowerCamelCase, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__A = inputs[-1]
__A = [x]
psp_outs.extend(self.psp_modules(_lowerCamelCase ) )
__A = torch.cat(_lowerCamelCase, dim=1 )
__A = self.bottleneck(_lowerCamelCase )
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : torch.Tensor ):
'''simple docstring'''
# build laterals
__A = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_lowerCamelCase ) )
# build top-down path
__A = len(_lowerCamelCase )
for i in range(used_backbone_levels - 1, 0, -1 ):
__A = laterals[i - 1].shape[2:]
__A = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=_lowerCamelCase, mode='''bilinear''', align_corners=self.align_corners )
# build outputs
__A = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1, 0, -1 ):
__A = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode='''bilinear''', align_corners=self.align_corners )
__A = torch.cat(_lowerCamelCase, dim=1 )
__A = self.fpn_bottleneck(_lowerCamelCase )
__A = self.classifier(_lowerCamelCase )
return output
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : Tuple, _lowerCamelCase : int = 2, _lowerCamelCase : int = 3, _lowerCamelCase : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
__A = config
__A = config.auxiliary_in_channels
__A = config.auxiliary_channels
__A = config.auxiliary_num_convs
__A = config.auxiliary_concat_input
__A = in_index
__A = (kernel_size // 2) * dilation
__A = []
convs.append(
UperNetConvModule(
self.in_channels, self.channels, kernel_size=_lowerCamelCase, padding=_lowerCamelCase, dilation=_lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels, self.channels, kernel_size=_lowerCamelCase, padding=_lowerCamelCase, dilation=_lowerCamelCase ) )
if self.num_convs == 0:
__A = nn.Identity()
else:
__A = nn.Sequential(*_lowerCamelCase )
if self.concat_input:
__A = UperNetConvModule(
self.in_channels + self.channels, self.channels, kernel_size=_lowerCamelCase, padding=kernel_size // 2 )
__A = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
self.apply(self._init_weights )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : List[Any] ):
'''simple docstring'''
if isinstance(_lowerCamelCase, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : torch.Tensor ):
'''simple docstring'''
# just take the relevant feature maps
__A = encoder_hidden_states[self.in_index]
__A = self.convs(_lowerCamelCase )
if self.concat_input:
__A = self.conv_cat(torch.cat([hidden_states, output], dim=1 ) )
__A = self.classifier(_lowerCamelCase )
return output
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : List[str] = UperNetConfig
A_ : Optional[Any] = "pixel_values"
A_ : str = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : int ):
'''simple docstring'''
if isinstance(_lowerCamelCase, _lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Tuple=False ):
'''simple docstring'''
if isinstance(_lowerCamelCase, _lowerCamelCase ):
__A = value
lowercase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , )
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : str ):
'''simple docstring'''
super().__init__(_lowerCamelCase )
__A = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__A = UperNetHead(_lowerCamelCase, in_channels=self.backbone.channels )
__A = UperNetFCNHead(_lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_lowerCamelCase, config_class=_CONFIG_FOR_DOC )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[bool] = None, ):
'''simple docstring'''
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = output_attentions if output_attentions is not None else self.config.output_attentions
__A = self.backbone.forward_with_filtered_kwargs(
_lowerCamelCase, output_hidden_states=_lowerCamelCase, output_attentions=_lowerCamelCase )
__A = outputs.feature_maps
__A = self.decode_head(_lowerCamelCase )
__A = nn.functional.interpolate(_lowerCamelCase, size=pixel_values.shape[2:], mode='''bilinear''', align_corners=_lowerCamelCase )
__A = None
if self.auxiliary_head is not None:
__A = self.auxiliary_head(_lowerCamelCase )
__A = nn.functional.interpolate(
_lowerCamelCase, size=pixel_values.shape[2:], mode='''bilinear''', align_corners=_lowerCamelCase )
__A = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__A = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__A = loss_fct(_lowerCamelCase, _lowerCamelCase )
__A = loss_fct(_lowerCamelCase, _lowerCamelCase )
__A = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__A = (logits,) + outputs[1:]
else:
__A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_lowerCamelCase, logits=_lowerCamelCase, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 266
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ):
'''simple docstring'''
__A = np.random.default_rng(_lowerCamelCase )
__A = length
__A = rng.normal(size=(length,) ).astype(np.floataa )
__A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__( self : str ):
'''simple docstring'''
return self.length
def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a[0] + self.b[0]
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a + self.b
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__A = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__A = load_dataset('''csv''' , data_files=__UpperCamelCase )
__A = datasets['''train'''].unique('''label''' )
__A = {v: i for i, v in enumerate(__UpperCamelCase )}
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__A = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 )
__A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 266
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ = """hf-internal-testing/tiny-random-bert"""
a_ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
a_ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = cached_file(__lowerCamelCase , __lowerCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCamelCase , __lowerCamelCase ) ) )
with open(os.path.join(__lowerCamelCase , '''refs''' , '''main''' ) ) as f:
__A : List[Any] = f.read()
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , '''snapshots''' , __lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# File is cached at the same place the second time.
__A : List[Any] = cached_file(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# Using a specific revision to test the full commit hash.
__A : int = cached_file(__lowerCamelCase , __lowerCamelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , '''snapshots''' , __lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid model identifier''' ):
__A : List[Any] = cached_file('''tiny-random-bert''' , __lowerCamelCase )
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid git identifier''' ):
__A : Optional[int] = cached_file(__lowerCamelCase , __lowerCamelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCamelCase , '''does not appear to have a file named''' ):
__A : Optional[Any] = cached_file(__lowerCamelCase , '''conf''' )
def UpperCamelCase__( self ):
'''simple docstring'''
with self.assertRaisesRegex(__lowerCamelCase , '''does not appear to have a file named''' ):
__A : Any = cached_file(__lowerCamelCase , '''conf''' )
with open(os.path.join(__lowerCamelCase , '''refs''' , '''main''' ) ) as f:
__A : Union[str, Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''.no_exist''' , __lowerCamelCase , '''conf''' ) ) )
__A : Any = cached_file(__lowerCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
__A : Optional[Any] = cached_file(__lowerCamelCase , '''conf''' , local_files_only=__lowerCamelCase , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
__A : List[Any] = mock.Mock()
__A : List[str] = 500
__A : str = {}
__A : Any = HTTPError
__A : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCamelCase ) as mock_head:
__A : List[str] = cached_file(__lowerCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCamelCase , revision='''ahaha''' )
__A : List[str] = get_file_from_repo('''bert-base-cased''' , __lowerCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(__lowerCamelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def UpperCamelCase__( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[Any] = Path(__lowerCamelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCamelCase , '''a.txt''' ) , str(__lowerCamelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCamelCase , '''b.txt''' ) )
| 179
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
__A : Tuple = dtype
def UpperCamelCase__( self , __lowerCamelCase = None ):
'''simple docstring'''
if common is None:
__A : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Tuple = jnp.array(1.0 , dtype=self.dtype )
__A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return sample
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ):
'''simple docstring'''
__A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = state.common.alphas_cumprod[t]
__A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Optional[Any] = variance
__A : Optional[Any] = state.common.betas[t]
__A : Any = (predicted_variance + 1) / 2
__A : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ):
'''simple docstring'''
__A : Optional[int] = timestep
if key is None:
__A : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
__A : List[str] = None
# 1. compute alphas, betas
__A : Dict = state.common.alphas_cumprod[t]
__A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : Tuple = 1 - alpha_prod_t
__A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Any = model_output
elif self.config.prediction_type == "v_prediction":
__A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : str = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : List[Any] = jax.random.split(__lowerCamelCase , num=1 )
__A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 179
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_ = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE_ = '▁'
class a ( lowerCamelCase_ ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
_lowercase = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
_UpperCAmelCase : List[str] = vocab_file
_UpperCAmelCase : str = False if not self.vocab_file else True
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 367
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = "ZinengTang/tvlt-base"
_UpperCAmelCase : int = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A_ )
self.assertIsInstance(processor.image_processor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : List[str] = np.ones([12000] )
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" )
_UpperCAmelCase : int = processor(audio=A_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : Tuple = image_processor(A_ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : Dict = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : str = np.ones([12000] )
_UpperCAmelCase : Optional[Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : List[Any] = processor(audio=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : str = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 189
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase : List[str] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
def A_ ( A__ , A__=100 , A__=" " ) -> List[str]:
a__ : List[Any] = text.split(A__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A__ ) , A__ )]
def A_ ( A__ ) -> dict:
a__ , a__ : Tuple = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(A__ ):
titles.append(title if title is not None else '' )
texts.append(A__ )
return {"title": titles, "text": texts}
def A_ ( A__ , A__ , A__ ) -> dict:
a__ : Optional[int] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=A__ , padding='longest' , return_tensors='pt' )['input_ids']
a__ : List[Any] = ctx_encoder(input_ids.to(device=A__ ) , return_dict=A__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def A_ ( A__ , A__ , A__ , ) -> Dict:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a__ : Optional[int] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a__ : Any = dataset.map(A__ , batched=A__ , num_proc=processing_args.num_proc )
# And compute the embeddings
a__ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A__ )
a__ : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a__ : Union[str, Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
a__ : str = dataset.map(
partial(A__ , ctx_encoder=A__ , ctx_tokenizer=A__ ) , batched=A__ , batch_size=processing_args.batch_size , features=A__ , )
# And finally save your dataset
a__ : Any = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(A__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a__ : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=A__ )
# And save the index
a__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(A__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
__A : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
__A : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
__A : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class A__ :
"""simple docstring"""
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
__A : int = field(
default=1_6 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class A__ :
"""simple docstring"""
__A : int = field(
default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase : Any = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 99
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase : Tuple = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowercase = " ") -> Tuple:
'''simple docstring'''
a__ : Tuple = sentence_delimiter
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return list(lowercase)
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = []
for sent_idx, sentence in enumerate(lowercase):
chars.extend(self.process_string(lowercase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase) - 1:
chars.append(self.sentence_delimiter)
return chars
lowercase : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase : List[Any] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowercase : Optional[int] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowercase : Optional[Any] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Any:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
a__ : Optional[int] = 0
a__ : str = 0
for prediction, reference in zip(lowercase , lowercase):
a__ : Optional[int] = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 99
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : int ):
a__: List[str] =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
a__: Tuple =self.diffusers_dir
shutil.copy(
os.path.join(_a , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _lowerCamelCase ( self : str ):
a__: Tuple ="src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _lowerCamelCase ( self : List[str] , _a : Optional[Any] , _a : int , _a : Optional[int] , _a : str=None ):
a__: int =comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
a__: Dict =comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
a__: List[str] =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
a__: Optional[int] =black.format_str(_a , mode=_a )
a__: Any =os.path.join(self.diffusers_dir , "new_code.py" )
with open(_a , "w" , newline="\n" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , "r" ) as f:
self.assertTrue(f.read() , _a )
def _lowerCamelCase ( self : Optional[Any] ):
a__: Union[str, Any] =check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_a , _a )
def _lowerCamelCase ( self : str ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _a ) , )
# Copy consistency with a really long name
a__: str ="TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub("Bert" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _a , overwrite_result=re.sub("DDPM" , "Test" , _a ) , )
| 42
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = 42
class lowerCamelCase__ ( _a , _a ):
@register_to_config
def __init__( self : Union[str, Any] , _a : int = 1_6 , _a : int = 8_8 , _a : Optional[int] = None , _a : Optional[int] = None , _a : int = 1 , _a : float = 0.0 , _a : int = 3_2 , _a : Optional[int] = None , _a : bool = False , _a : Optional[int] = None , _a : str = "geglu" , _a : bool = True , _a : bool = True , ):
super().__init__()
a__: List[Any] =num_attention_heads
a__: Tuple =attention_head_dim
a__: Dict =num_attention_heads * attention_head_dim
a__: List[Any] =in_channels
a__: Dict =torch.nn.GroupNorm(num_groups=_a , num_channels=_a , eps=1e-6 , affine=_a )
a__: str =nn.Linear(_a , _a )
# 3. Define transformers blocks
a__: Optional[int] =nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , cross_attention_dim=_a , activation_fn=_a , attention_bias=_a , double_self_attention=_a , norm_elementwise_affine=_a , )
for d in range(_a )
] )
a__: Any =nn.Linear(_a , _a )
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[Any]=None , _a : int=None , _a : int=None , _a : Optional[int]=1 , _a : Tuple=None , _a : bool = True , ):
a__ , a__ , a__ , a__: int =hidden_states.shape
a__: str =batch_frames // num_frames
a__: Any =hidden_states
a__: Optional[int] =hidden_states[None, :].reshape(_a , _a , _a , _a , _a )
a__: Union[str, Any] =hidden_states.permute(0 , 2 , 1 , 3 , 4 )
a__: Tuple =self.norm(_a )
a__: Union[str, Any] =hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _a , _a )
a__: Dict =self.proj_in(_a )
# 2. Blocks
for block in self.transformer_blocks:
a__: str =block(
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , class_labels=_a , )
# 3. Output
a__: Any =self.proj_out(_a )
a__: Optional[int] =(
hidden_states[None, None, :]
.reshape(_a , _a , _a , _a , _a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
a__: Dict =hidden_states.reshape(_a , _a , _a , _a )
a__: List[str] =hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_a )
| 42
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
_snake_case : Optional[Any] = {"target_lang": "fi", "source_lang": "en"}
_snake_case : Any = ">>zh<<"
_snake_case : Any = "Helsinki-NLP/"
if is_torch_available():
_snake_case : Optional[Any] = "pt"
elif is_tf_available():
_snake_case : Any = "tf"
else:
_snake_case : Optional[int] = "jax"
@require_sentencepiece
class a (__lowercase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = MarianTokenizer
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = True
def __snake_case ( self : Tuple ) -> List[Any]:
super().setUp()
__snake_case : List[str] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__snake_case : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
__snake_case : Dict = Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_a , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_a , save_dir / VOCAB_FILES_NAMES["target_spm"] )
__snake_case : List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Any , **lowerCamelCase : List[Any] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **_a )
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[Any] ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Union[str, Any] ) -> Dict:
__snake_case : Tuple = '''</s>'''
__snake_case : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __snake_case ( self : str ) -> Tuple:
__snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_a ) , 9 )
def __snake_case ( self : Tuple ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __snake_case ( self : Optional[Any] ) -> List[Any]:
__snake_case : Union[str, Any] = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
__snake_case : List[str] = en_de_tokenizer(["I am a small frog"] , return_tensors=_a )
self.assertIsInstance(_a , _a )
__snake_case : str = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_a , batch.input_ids[0] )
__snake_case : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_a )
__snake_case : Optional[Any] = [x.name for x in Path(_a ).glob("*" )]
self.assertIn("source.spm" , _a )
MarianTokenizer.from_pretrained(_a )
def __snake_case ( self : List[str] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : List[str] = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=_a , truncation=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Dict = self.get_tokenizer()
__snake_case : List[Any] = tok(["I am a tiny frog", "I am a small frog"] , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __snake_case ( self : Tuple ) -> Dict:
# fmt: off
__snake_case : List[str] = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
__snake_case : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
__snake_case : Any = '''Tämä on testi'''
__snake_case : List[Any] = '''This is a test'''
__snake_case : Tuple = [76, 7, 2047, 2]
__snake_case : Union[str, Any] = [69, 12, 11, 940, 2]
__snake_case : str = tokenizer(_a ).input_ids
self.assertListEqual(_a , _a )
__snake_case : str = tokenizer(text_target=_a ).input_ids
self.assertListEqual(_a , _a )
__snake_case : List[str] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 365
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_snake_case : List[str] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = _TestCommandArgs(dataset=__lowerCamelCase , all_configs=__lowerCamelCase , save_infos=__lowerCamelCase )
__snake_case : List[Any] = TestCommand(*__lowerCamelCase )
test_command.run()
__snake_case : List[Any] = os.path.join(__lowerCamelCase , "README.md" )
assert os.path.exists(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
__snake_case : List[str] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case : Tuple = getattr(dataset_infos["default"] , __lowerCamelCase ), getattr(expected_dataset_infos["default"] , __lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCamelCase , __lowerCamelCase )
elif key == "splits":
assert list(__lowerCamelCase ) == list(__lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 134
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = """informer"""
_lowerCAmelCase : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "student_t" , lowerCAmelCase = "nll" , lowerCAmelCase = 1 , lowerCAmelCase = None , lowerCAmelCase = "mean" , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 64 , lowerCAmelCase = 32 , lowerCAmelCase = 32 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = True , lowerCAmelCase = "gelu" , lowerCAmelCase = 0.05 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1_00 , lowerCAmelCase = 0.02 , lowerCAmelCase=True , lowerCAmelCase = "prob" , lowerCAmelCase = 5 , lowerCAmelCase = True , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = prediction_length
snake_case = context_length or prediction_length
snake_case = distribution_output
snake_case = loss
snake_case = input_size
snake_case = num_time_features
snake_case = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case = scaling
snake_case = num_dynamic_real_features
snake_case = num_static_real_features
snake_case = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case = cardinality
else:
snake_case = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case = embedding_dimension
else:
snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case = num_parallel_samples
# Transformer architecture configuration
snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case = d_model
snake_case = encoder_attention_heads
snake_case = decoder_attention_heads
snake_case = encoder_ffn_dim
snake_case = decoder_ffn_dim
snake_case = encoder_layers
snake_case = decoder_layers
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = activation_function
snake_case = init_std
snake_case = use_cache
# Informer
snake_case = attention_type
snake_case = sampling_factor
snake_case = distil
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 150
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = None
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self.head
while self.head:
yield node.data
snake_case = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(lowerCAmelCase ) for item in iter(self ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(len(self ) , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(0 , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
snake_case = Node(lowerCAmelCase )
if self.head is None:
snake_case = new_node # first node points itself
snake_case = snake_case = new_node
elif index == 0: # insert at head
snake_case = self.head
snake_case = snake_case = new_node
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = new_node
if index == len(self ) - 1: # insert at tail
snake_case = new_node
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def snake_case ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def snake_case ( self , lowerCAmelCase = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
snake_case = self.head
if self.head == self.tail: # just one node
snake_case = snake_case = None
elif index == 0: # delete head node
snake_case = self.tail.next.next
snake_case = self.head.next
else:
snake_case = self.head
for _ in range(index - 1 ):
snake_case = temp.next
snake_case = temp.next
snake_case = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case = temp
return delete_node.data
def snake_case ( self ):
"""simple docstring"""
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
"""simple docstring"""
snake_case = CircularLinkedList()
assert len(_UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCamelCase ) == i
circular_linked_list.insert_nth(_UpperCamelCase , i + 1 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCamelCase ) == "->".join(str(_UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "unispeech"
def __init__( self: int , __A: Optional[int]=32 , __A: Union[str, Any]=7_68 , __A: Dict=12 , __A: Union[str, Any]=12 , __A: List[Any]=30_72 , __A: str="gelu" , __A: Tuple=0.1 , __A: str=0.1 , __A: Dict=0.1 , __A: Dict=0.0 , __A: int=0.0 , __A: List[Any]=0.1 , __A: Tuple=0.1 , __A: str=0.02 , __A: Optional[Any]=1e-5 , __A: List[Any]="group" , __A: List[str]="gelu" , __A: Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __A: Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __A: Any=(10, 3, 3, 3, 3, 2, 2) , __A: int=False , __A: Any=1_28 , __A: int=16 , __A: str=False , __A: Optional[int]=True , __A: Union[str, Any]=0.05 , __A: int=10 , __A: Union[str, Any]=2 , __A: str=0.0 , __A: str=10 , __A: List[str]=0 , __A: Any=3_20 , __A: Union[str, Any]=2 , __A: Optional[Any]=0.1 , __A: Optional[int]=1_00 , __A: Optional[int]=2_56 , __A: Tuple=2_56 , __A: Tuple=0.1 , __A: Tuple="mean" , __A: Tuple=False , __A: Any=False , __A: int=2_56 , __A: Union[str, Any]=80 , __A: Optional[Any]=0 , __A: Optional[int]=1 , __A: str=2 , __A: Optional[int]=0.5 , **__A: Optional[int] , ) -> Optional[int]:
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
_A = hidden_size
_A = feat_extract_norm
_A = feat_extract_activation
_A = list(__A )
_A = list(__A )
_A = list(__A )
_A = conv_bias
_A = num_conv_pos_embeddings
_A = num_conv_pos_embedding_groups
_A = len(self.conv_dim )
_A = num_hidden_layers
_A = intermediate_size
_A = hidden_act
_A = num_attention_heads
_A = hidden_dropout
_A = attention_dropout
_A = activation_dropout
_A = feat_proj_dropout
_A = final_dropout
_A = layerdrop
_A = layer_norm_eps
_A = initializer_range
_A = num_ctc_classes
_A = vocab_size
_A = do_stable_layer_norm
_A = use_weighted_layer_sum
_A = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A = apply_spec_augment
_A = mask_time_prob
_A = mask_time_length
_A = mask_time_min_masks
_A = mask_feature_prob
_A = mask_feature_length
_A = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_A = num_codevectors_per_group
_A = num_codevector_groups
_A = contrastive_logits_temperature
_A = feat_quantizer_dropout
_A = num_negatives
_A = codevector_dim
_A = proj_codevector_dim
_A = diversity_loss_weight
# ctc loss
_A = ctc_loss_reduction
_A = ctc_zero_infinity
# pretraining loss
_A = replace_prob
@property
def __A ( self: Any ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 358
|
__A = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 75
| 0
|
'''simple docstring'''
from math import isqrt
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : int = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __snake_case, __snake_case ):
_UpperCAmelCase : Tuple = False
return [i for i in range(2, __snake_case ) if is_prime[i]]
def __UpperCAmelCase ( a_: str = 10**8 ):
_UpperCAmelCase : int = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 145
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5
| 0
|
import operator
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(_UpperCAmelCase ):
if _operator(_UpperCAmelCase , sublist[-1] ):
sublist.append(_UpperCAmelCase )
arr.pop(_UpperCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(_UpperCAmelCase )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(_UpperCAmelCase ):
if not _operator(_UpperCAmelCase , _UpperCAmelCase ):
solution.insert(_UpperCAmelCase , _UpperCAmelCase )
break
else:
solution.append(_UpperCAmelCase )
strand_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 131
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[int]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Tuple=2.0 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : int=8 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = SwinvaModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__a = 1
__a = SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = SwinvaForImageClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase__ : Optional[int] = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : int = False
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Optional[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = SwinvaModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''')
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
__a = len(self.model_tester.depths)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = config.window_size**2
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__a = len(__SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
if hasattr(self.model_tester , '''num_hidden_states_types'''):
__a = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__a = 2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.hidden_states
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Swinv2 has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__a = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = reshaped_hidden_states[0].shape
__a = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(__SCREAMING_SNAKE_CASE)
for model_class in self.all_model_classes:
__a = model_class(config=__SCREAMING_SNAKE_CASE)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''')
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''').to(
__SCREAMING_SNAKE_CASE)
__a = self.default_image_processor
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-0.39_47, -0.43_06, 0.00_26]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 131
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Dict = "big_bird"
def __init__( self : Dict , UpperCAmelCase__ : Any=5_0_3_5_8 , UpperCAmelCase__ : Optional[int]=7_6_8 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : Dict=3_0_7_2 , UpperCAmelCase__ : Tuple="gelu_new" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[Any]=4_0_9_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : int=6_6 , UpperCAmelCase__ : Any="block_sparse" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : str=6_4 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ) -> List[str]:
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , sep_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = rescale_embeddings
__SCREAMING_SNAKE_CASE = attention_type
__SCREAMING_SNAKE_CASE = use_bias
__SCREAMING_SNAKE_CASE = block_size
__SCREAMING_SNAKE_CASE = num_random_blocks
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 54
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__SCREAMING_SNAKE_CASE = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCAmelCase__ , )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cpu" )
__SCREAMING_SNAKE_CASE = 0
elif is_sagemaker_model_parallel_available():
__SCREAMING_SNAKE_CASE = smp.local_rank()
__SCREAMING_SNAKE_CASE = torch.device("cuda" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def UpperCAmelCase_ ( self : Dict ) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return False
| 54
| 1
|
"""simple docstring"""
from itertools import permutations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowercase_ = [7, 11, 13, 17]
for i, test in enumerate(__lowerCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(__lowerCAmelCase , __lowerCAmelCase ) ) )
for num in permutations(range(__lowerCAmelCase ) )
if is_substring_divisible(__lowerCAmelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type"""))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**lowerCAmelCase_)
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = """post_processor"""
lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["""sep"""])
if "cls" in state:
lowercase_ = tuple(state["""cls"""])
lowercase_ = False
if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type"""))
lowercase_ = component_class(**lowerCAmelCase_)
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value
lowercase_ = value
def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
lowercase_ = super()._pad(
encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_)
if needs_to_be_padded:
lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side))
return encoded_inputs
| 313
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 134
|
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Dict=64 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str=5_12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Union[str, Any]=1 , ) -> List[Any]:
'''simple docstring'''
A__ : Dict =parent
A__ : Optional[int] =batch_size
A__ : List[Any] =seq_length
A__ : Any =is_training
A__ : List[str] =use_input_mask
A__ : str =use_token_type_ids
A__ : Tuple =use_labels
A__ : Tuple =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Dict =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Any =type_sequence_label_size
A__ : int =initializer_range
A__ : str =num_labels
A__ : Optional[int] =num_choices
A__ : Optional[int] =scope
A__ : List[str] =q_groups
A__ : Dict =k_groups
A__ : Any =v_groups
A__ : Optional[Any] =post_attention_groups
A__ : Optional[int] =intermediate_groups
A__ : Optional[int] =output_groups
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] =None
if self.use_input_mask:
A__ : str =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] =None
A__ : Tuple =None
A__ : Dict =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : int =ids_tensor([self.batch_size] , self.num_choices )
A__ : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =SqueezeBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
A__ : str =SqueezeBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Dict =self.num_labels
A__ : int =SqueezeBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : str =self.num_labels
A__ : int =SqueezeBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.num_choices
A__ : Dict =SqueezeBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : Any =self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : Any =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = True
__snake_case = False
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModelTester(self )
A__ : int =ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int =SqueezeBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
A__ : List[str] =torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
A__ : Tuple =model(lowerCAmelCase_ )[0]
A__ : Union[str, Any] =torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : Tuple =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
| 134
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Dict ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCamelCase = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCamelCase = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCamelCase ( self : List[str] ) -> Tuple:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCamelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase = [[1, 2, 3], [1, 2, 4]]
_UpperCamelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(1 )
_UpperCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(2 )
_UpperCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(3 )
_UpperCamelCase = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCamelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 54
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''OwlViTImageProcessor'''
snake_case__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Any , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCamelCase = kwargs.pop('''feature_extractor''' )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : List[str] , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]="max_length" , __UpperCamelCase : List[Any]="np" , **__UpperCamelCase : Optional[int] ) -> Optional[int]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ) or (isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(text[0] , __UpperCamelCase )):
_UpperCamelCase = [self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(text[0] , __UpperCamelCase ):
_UpperCamelCase = []
# Maximum number of queries across batch
_UpperCamelCase = max([len(__UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCamelCase ) != max_num_queries:
_UpperCamelCase = t + [''' '''] * (max_num_queries - len(__UpperCamelCase ))
_UpperCamelCase = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
encodings.append(__UpperCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
_UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
_UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = input_ids
_UpperCamelCase = attention_mask
if query_images is not None:
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ).pixel_values
_UpperCamelCase = query_pixel_values
if images is not None:
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def _UpperCamelCase ( self : str , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> List[Any]:
return self.image_processor.post_process(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[Any] ) -> Optional[int]:
return self.image_processor.post_process_object_detection(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) -> int:
return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ) -> str:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[Any] ) -> List[str]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCamelCase , )
return self.image_processor
| 54
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ , snake_case__ : Tuple = image.size
snake_case__ , snake_case__ : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case__ : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
snake_case__ : Union[str, Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
snake_case__ : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case__ : Tuple = torch.from_numpy(_lowerCAmelCase )
return 2.0 * image - 1.0
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Optional[Any] , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 100 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
snake_case__ : str = 1
elif isinstance(snake_case_ , torch.Tensor ):
snake_case__ : List[Any] = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}" )
if isinstance(snake_case_ , PIL.Image.Image ):
snake_case__ : Union[str, Any] = preprocess(snake_case_ )
snake_case__ , snake_case__ : Tuple = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case__ : str = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case__ : Dict = next(self.unet.parameters() ).dtype
snake_case__ : int = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
snake_case__ : Optional[Any] = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
snake_case__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : Optional[int] = {}
if accepts_eta:
snake_case__ : Optional[Any] = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
snake_case__ : int = torch.cat([latents, image] , dim=1 )
snake_case__ : Optional[int] = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
snake_case__ : str = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Tuple = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
snake_case__ : Optional[int] = self.vqvae.decode(snake_case_ ).sample
snake_case__ : Optional[int] = torch.clamp(snake_case_ , -1.0 , 1.0 )
snake_case__ : Dict = image / 2 + 0.5
snake_case__ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Tuple = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 35
|
from __future__ import annotations
from PIL import Image
# Define glider example
A : Any = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
A__ = []
for i in range(len(__a ) ):
A__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]:
"""simple docstring"""
A__ = []
for _ in range(__a ):
# Create output image
A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) )
A__ = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
A__ = 2_5_5 - cells[y][x] * 2_5_5
A__ = (colour, colour, colour)
# Save image
images.append(__a )
A__ = new_generation(__a )
return images
if __name__ == "__main__":
A : str = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 274
| 0
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__a :Dict = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__a :List[Any] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__a :int = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def __A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]="auto" , UpperCAmelCase : Any=-1 , UpperCAmelCase : Union[str, Any]=0.9 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Optional[int]=500 , UpperCAmelCase : List[str]="gpt2-large" , UpperCAmelCase : Union[str, Any]=-1 , UpperCAmelCase : List[Any]=1024 , UpperCAmelCase : str=25 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : str=True , UpperCAmelCase : Any=25 , ):
A_ = compute_mauve(
p_text=lowerCAmelCase__ , q_text=lowerCAmelCase__ , p_features=lowerCAmelCase__ , q_features=lowerCAmelCase__ , p_tokens=lowerCAmelCase__ , q_tokens=lowerCAmelCase__ , num_buckets=lowerCAmelCase__ , pca_max_data=lowerCAmelCase__ , kmeans_explained_var=lowerCAmelCase__ , kmeans_num_redo=lowerCAmelCase__ , kmeans_max_iter=lowerCAmelCase__ , featurize_model_name=lowerCAmelCase__ , device_id=lowerCAmelCase__ , max_text_length=lowerCAmelCase__ , divergence_curve_discretization_size=lowerCAmelCase__ , mauve_scaling_factor=lowerCAmelCase__ , verbose=lowerCAmelCase__ , seed=lowerCAmelCase__ , )
return out
| 366
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329
| 0
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 10_00 , SCREAMING_SNAKE_CASE_ : bool = True ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
return int((number_a + number_a) / 2 )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(SCREAMING_SNAKE_CASE_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
last_numbers.append(SCREAMING_SNAKE_CASE_ )
if answer(SCREAMING_SNAKE_CASE_ ) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(SCREAMING_SNAKE_CASE_ ) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'guess the number : {last_numbers[-1]}' )
print(F'details : {last_numbers!s}' )
def lowercase () -> None:
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ' ).strip() )
SCREAMING_SNAKE_CASE = int(input('Enter high value : ' ).strip() )
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ' ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 113
|
"""simple docstring"""
from math import isclose, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]:
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase (SCREAMING_SNAKE_CASE_ : float = 1.4 , SCREAMING_SNAKE_CASE_ : float = -9.6 ) -> int:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 113
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCamelCase = random.Random()
def _a ( _lowerCamelCase , _lowerCamelCase=1.0 , _lowerCamelCase=None , _lowerCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
__snake_case : Dict = global_rng
__snake_case : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _A ( unittest.TestCase ):
def __init__( self : Any , __magic_name__ : Optional[int] , __magic_name__ : List[str]=7 , __magic_name__ : List[str]=4_00 , __magic_name__ : List[Any]=20_00 , __magic_name__ : int=24 , __magic_name__ : List[str]=24 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : List[str]=1_60_00 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , ) -> int:
"""simple docstring"""
__snake_case : List[str] = parent
__snake_case : Dict = batch_size
__snake_case : List[str] = min_seq_length
__snake_case : str = max_seq_length
__snake_case : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case : Optional[Any] = feature_size
__snake_case : Tuple = num_mel_bins
__snake_case : Union[str, Any] = padding_value
__snake_case : str = sampling_rate
__snake_case : Tuple = return_attention_mask
__snake_case : List[Any] = do_normalize
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : int , __magic_name__ : str=False , __magic_name__ : Tuple=False ) -> int:
"""simple docstring"""
def _flatten(__magic_name__ : List[Any] ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
__snake_case : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case : str = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase__: Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self : Tuple , __magic_name__ : str ) -> str:
"""simple docstring"""
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__snake_case : List[str] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__snake_case : Dict = feature_extractor(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__snake_case : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__snake_case : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test batched
__snake_case : Union[str, Any] = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features
__snake_case : List[Any] = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case : Any = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__snake_case : Dict = np.asarray(__UpperCAmelCase )
__snake_case : Any = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features
__snake_case : List[Any] = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__snake_case : Tuple = ["""longest""", """max_length""", """do_not_pad"""]
__snake_case : Tuple = [None, 16, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
__snake_case : Optional[int] = feature_extractor(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase )
__snake_case : int = inputs.input_features
__snake_case : int = inputs.attention_mask
__snake_case : str = [np.sum(__UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__snake_case : int = ["""longest""", """max_length""", """do_not_pad"""]
__snake_case : str = [None, 16, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
__snake_case : List[str] = feature_extractor(
__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase )
__snake_case : Tuple = inputs.input_features
__snake_case : str = inputs.attention_mask
__snake_case : Any = [np.sum(__UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__snake_case : Any = feature_extractor(
__UpperCAmelCase , padding="""max_length""" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase , )
__snake_case : List[Any] = inputs.input_features
__snake_case : Any = inputs.attention_mask
__snake_case : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__snake_case : Dict = feature_extractor(
__UpperCAmelCase , padding="""longest""" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase , )
__snake_case : Dict = inputs.input_features
__snake_case : List[str] = inputs.attention_mask
__snake_case : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__snake_case : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__snake_case : Any = feature_extractor(
__UpperCAmelCase , padding="""longest""" , max_length=16 , truncation=__UpperCAmelCase , return_tensors="""np""" , return_attention_mask=__UpperCAmelCase , )
__snake_case : List[str] = inputs.input_features
__snake_case : Tuple = inputs.attention_mask
__snake_case : List[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
import torch
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : List[str] = np.random.rand(1_00 , 32 ).astype(np.floataa )
__snake_case : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case : Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__snake_case : List[str] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__snake_case : Optional[int] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case : List[Any] = ds.sort("""id""" ).select(range(__UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Any = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__snake_case : Tuple = self._load_datasamples(1 )
__snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case : Union[str, Any] = feature_extractor(__UpperCAmelCase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , __UpperCAmelCase , atol=1E-4 ) )
| 365
|
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = BloomTokenizerFast
lowerCAmelCase__ : Tuple = BloomTokenizerFast
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Union[str, Any] = """tokenizer_file"""
lowerCAmelCase__ : Dict = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def UpperCamelCase__ (self : int ):
'''simple docstring'''
super().setUp()
lowercase__ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : str , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase__ = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase__ = tokenizer.batch_encode_plus(UpperCamelCase )['''input_ids''']
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any]=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ = '''This is a simple input'''
lowercase__ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ = ('''This is a simple input''', '''This is a pair''')
lowercase__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase__ = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCamelCase )
lowercase__ = next(iter(UpperCamelCase ) )['''premise'''] # pick up one data
lowercase__ = list(sample_data.values() )
lowercase__ = list(map(tokenizer.encode , UpperCamelCase ) )
lowercase__ = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 2
|
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ : int =logging.get_logger(__name__)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""audio_values""", """audio_mask"""]
def __init__( self , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1 , lowerCAmelCase__=[1_6, 1_6] , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_4_1_0_0 , lowerCAmelCase__=8_6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = n_fft
SCREAMING_SNAKE_CASE_ : List[str] = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE_ : Optional[Any] = sampling_rate
SCREAMING_SNAKE_CASE_ : Dict = padding_value
SCREAMING_SNAKE_CASE_ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=lowerCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
SCREAMING_SNAKE_CASE_ : int = log_spec[:, :-1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE_ : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : List[str] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE_ : Tuple = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE_ : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE_ : List[str] = np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE_ : str = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : str = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ : Tuple = audio_features[i]
SCREAMING_SNAKE_CASE_ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE_ : List[str] = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE_ : int = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 361
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a__ ( A__ ):
if is_torch_version('<', '2.0.0' ) or not hasattr(A__, '_dynamo' ):
return False
return isinstance(A__, torch._dynamo.eval_frame.OptimizedModule )
def a__ ( A__, A__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ : List[str] = is_compiled_module(A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[Any] = model
SCREAMING_SNAKE_CASE_ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ : str = getattr(A__, 'forward' )
SCREAMING_SNAKE_CASE_ : Any = model.__dict__.pop('_original_forward', A__ )
if original_forward is not None:
while hasattr(A__, '__wrapped__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ : Any = forward
if getattr(A__, '_converted_to_transformer_engine', A__ ):
convert_model(A__, to_transformer_engine=A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[str] = model
SCREAMING_SNAKE_CASE_ : Dict = compiled_model
return model
def a__ ( ):
PartialState().wait_for_everyone()
def a__ ( A__, A__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__, A__ )
elif PartialState().local_process_index == 0:
torch.save(A__, A__ )
@contextmanager
def a__ ( **A__ ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a__ ( A__ ):
if not hasattr(A__, '__qualname__' ) and not hasattr(A__, '__name__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(A__, '__class__', A__ )
if hasattr(A__, '__qualname__' ):
return obj.__qualname__
if hasattr(A__, '__name__' ):
return obj.__name__
return str(A__ )
def a__ ( A__, A__ ):
for key, value in source.items():
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = destination.setdefault(A__, {} )
merge_dicts(A__, A__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = value
return destination
def a__ ( A__ = None ):
if port is None:
SCREAMING_SNAKE_CASE_ : Tuple = 2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 162
| 0
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : str = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {'vocab_file': 'spiece.model'}
__snake_case : int = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
__snake_case : Optional[Any] = {
'AI-Sweden/gpt-sw3-126m': 2_048,
'AI-Sweden/gpt-sw3-350m': 2_048,
'AI-Sweden/gpt-sw3-1.6b': 2_048,
'AI-Sweden/gpt-sw3-6.7b': 2_048,
'AI-Sweden/gpt-sw3-20b': 2_048,
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Any , ) -> None:
"""simple docstring"""
__lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase : Dict = kwargs.get("name_or_path")
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored")
__lowerCAmelCase : List[str] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCAmelCase : int = "<|endoftext|>" if eos_token is None else eos_token
__lowerCAmelCase : Tuple = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token
__lowerCAmelCase : List[str] = eos_token if bos_token is None else bos_token
else:
__lowerCAmelCase : Optional[int] = "<pad>" if pad_token is None else pad_token
__lowerCAmelCase : List[Any] = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = do_lower_case
__lowerCAmelCase : str = remove_space
__lowerCAmelCase : Tuple = keep_accents
__lowerCAmelCase : List[Any] = vocab_file
__lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_SCREAMING_SNAKE_CASE)
# Used for whitespace normalization in input texts
# fmt : off
__lowerCAmelCase : Any = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCAmelCase : Dict = re.compile(
F"""[{''.join(map(_SCREAMING_SNAKE_CASE , list(range(0 , 9)) + list(range(11 , 32)) + list(range(127 , 160)) + [160, 173, 8203]))}]""")
def __getstate__( self: Any) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__dict__.copy()
__lowerCAmelCase : Tuple = None
return state
def __setstate__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self: int) -> int:
"""simple docstring"""
return len(self.sp_model)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: str) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub("" , _SCREAMING_SNAKE_CASE)
# Normalize whitespaces
__lowerCAmelCase : List[Any] = "".join([char if char not in self.whitespaces else " " for char in text])
# NFC Unicode normalization
__lowerCAmelCase : Dict = unicodedata.normalize("NFC" , _SCREAMING_SNAKE_CASE)
return text
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Any) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.preprocess_text(_SCREAMING_SNAKE_CASE)
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: str) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE)
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: str) -> str:
"""simple docstring"""
return out_string
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : str = ""
__lowerCAmelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE) + token
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE)
return out_string
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict[str, int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowerCAmelCase : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(_SCREAMING_SNAKE_CASE , "wb") as fi:
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, List[str]] , _SCREAMING_SNAKE_CASE: Union[str, bool] = False) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : str = self.preprocess_text(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.sp_model.encode(_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Optional[Any] = [self.preprocess_text(_SCREAMING_SNAKE_CASE) for t in text]
__lowerCAmelCase : Optional[int] = self.sp_model.encode(_SCREAMING_SNAKE_CASE)
if return_tensors is True or return_tensors == "pt":
__lowerCAmelCase : Optional[int] = torch.tensor(_SCREAMING_SNAKE_CASE)
return token_ids
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union[int, List[int]]) -> str:
"""simple docstring"""
return self.sp_model.decode(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: "Conversation") -> List[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__lowerCAmelCase : int = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(_SCREAMING_SNAKE_CASE) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=_SCREAMING_SNAKE_CASE)
| 269
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 269
| 1
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( lowercase : int ) -> str:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case : Tuple = precision
snake_case : int = ceil(precision / 14 )
snake_case : Any = 42_6880 * Decimal(1_0005 ).sqrt()
snake_case : Optional[Any] = 1
snake_case : List[str] = 1359_1409
snake_case : Any = Decimal(lowercase )
for k in range(1 , lowercase ):
snake_case : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__snake_case = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 112
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "swinv2"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] ,lowercase_ : Any=2_2_4 ,lowercase_ : List[str]=4 ,lowercase_ : str=3 ,lowercase_ : Union[str, Any]=9_6 ,lowercase_ : Optional[Any]=[2, 2, 6, 2] ,lowercase_ : Union[str, Any]=[3, 6, 1_2, 2_4] ,lowercase_ : Union[str, Any]=7 ,lowercase_ : Union[str, Any]=4.0 ,lowercase_ : Tuple=True ,lowercase_ : Union[str, Any]=0.0 ,lowercase_ : List[Any]=0.0 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Union[str, Any]="gelu" ,lowercase_ : Any=False ,lowercase_ : List[Any]=0.02 ,lowercase_ : List[str]=1E-5 ,lowercase_ : Any=3_2 ,**lowercase_ : Dict ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : int = len(lowercase_ )
lowerCAmelCase__ : Optional[int] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : Optional[Any] = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Dict = use_absolute_embeddings
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase__ : Union[str, Any] = (0, 0, 0, 0)
| 74
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 74
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
snake_case_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
snake_case_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _lowerCAmelCase ( lowercase_ ):
with open(lowercase_ , 'rb' ) as f:
UpperCAmelCase = Image.open(lowercase_ )
return im.convert('RGB' )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the training data."""} )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the validation data."""} )
__UpperCamelCase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase__ ( self :Tuple ) -> str:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(SCREAMING_SNAKE_CASE_ )} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = torch.stack([example['pixel_values'] for example in examples] )
UpperCAmelCase = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase = {}
if data_args.train_dir is not None:
UpperCAmelCase = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
UpperCAmelCase = os.path.join(data_args.validation_dir , '**' )
UpperCAmelCase = load_dataset(
'imagefolder' , data_files=lowercase_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase_ ) and data_args.train_val_split > 0.0:
UpperCAmelCase = dataset['train'].train_test_split(data_args.train_val_split )
UpperCAmelCase = split['train']
UpperCAmelCase = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = dataset['train'].features['labels'].names
UpperCAmelCase , UpperCAmelCase = {}, {}
for i, label in enumerate(lowercase_ ):
UpperCAmelCase = str(lowercase_ )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
UpperCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
UpperCAmelCase = image_processor.size['shortest_edge']
else:
UpperCAmelCase = (image_processor.size['height'], image_processor.size['width'])
UpperCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
UpperCAmelCase = Compose(
[
RandomResizedCrop(lowercase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
UpperCAmelCase = Compose(
[
Resize(lowercase_ ),
CenterCrop(lowercase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase_ ):
UpperCAmelCase = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(lowercase_ ):
UpperCAmelCase = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCAmelCase = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase_ )
# Initalize our trainer
UpperCAmelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowercase_ )
trainer.save_metrics('eval' , lowercase_ )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 78
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['prompt']
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
if "image" in inputs:
UpperCAmelCase = inputs['image']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['mask_image']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['original_image']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 78
| 1
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def lowerCAmelCase_ ( __UpperCAmelCase: Path , __UpperCAmelCase: list ) -> Optional[Any]:
UpperCamelCase__ : List[str] = '''\n'''.join(__UpperCAmelCase )
Path(__UpperCAmelCase ).open('''w''' ).writelines(__UpperCAmelCase )
UpperCAmelCase_ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase_ = 'sshleifer/bart-tiny-random'
UpperCAmelCase_ = 'sshleifer/tiny-mbart'
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCamelCase__ : int = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCamelCase__ : int = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(__magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCamelCase__ : Union[str, Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCamelCase__ : Dict = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(__magic_name__, '''argv''', __magic_name__ ):
run_generate()
assert Path(__magic_name__ ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.run_eval_tester(__magic_name__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.run_eval_tester(__magic_name__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCamelCase__ : Union[str, Any] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCamelCase__ : int = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCamelCase__ : Optional[Any] = Path(self.get_auto_remove_tmp_dir() )
UpperCamelCase__ : str = str(tmp_dir / '''scores.json''' )
UpperCamelCase__ : Tuple = str(tmp_dir / '''val.target''' )
_dump_articles(__magic_name__, text['''en'''] )
_dump_articles(__magic_name__, text['''de'''] )
UpperCamelCase__ : List[str] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCamelCase__ : int = f"\n run_eval_search.py\n {model}\n {str(__magic_name__ )}\n {str(__magic_name__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(__magic_name__, '''argv''', __magic_name__ ):
with CaptureStdout() as cs:
run_search()
UpperCamelCase__ : int = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCamelCase__ : str = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(__magic_name__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__magic_name__ ).exists()
os.remove(Path(__magic_name__ ) )
| 247
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any]=False ) -> List[Any]:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Any , __UpperCAmelCase: Dict=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Tuple = ''''''
else:
UpperCamelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple ) -> Dict:
UpperCamelCase__ : List[str] = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : int = val
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any]=True ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase__ : List[str] = 8
# set labels if required
if not base_model:
UpperCamelCase__ : Union[str, Any] = 1000
UpperCamelCase__ : Optional[Any] = '''huggingface/label-files'''
UpperCamelCase__ : Dict = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase__ : str = 384
UpperCamelCase__ : str = 1536
UpperCamelCase__ : Tuple = 12
UpperCamelCase__ : Optional[int] = 6
# load original model from torch hub
UpperCamelCase__ : Any = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : str = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
UpperCamelCase__ : int = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
UpperCamelCase__ : int = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
UpperCamelCase__ : Optional[int] = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase__ : Dict = ViTImageProcessor()
UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Optional[Any] = encoding['''pixel_values''']
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
if base_model:
UpperCamelCase__ : Union[str, Any] = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCamelCase__ : Any = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 247
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Dict = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_ ( A__ : list , A__ : int = 3 ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = min(A__ )
lowerCAmelCase_ : Optional[int] = max(A__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , A__ ) for x in data]
def UpperCamelCase_ ( A__ : list , A__ : int = 3 ):
'''simple docstring'''
lowerCAmelCase_ : str = mean(A__ )
lowerCAmelCase_ : List[Any] = stdev(A__ )
# standardize data
return [round((x - mu) / (sigma) , A__ ) for x in data]
| 120
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCamelCase : Optional[Any] = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCamelCase : Union[str, Any] = 'UperNetConfig'
class lowercase__ ( nn.Module):
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Union[int, Tuple[int, int]] , UpperCamelCase__ : Union[int, Tuple[int, int], str] = 0 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.Convad(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , bias=UpperCamelCase__ , dilation=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = nn.BatchNormad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = nn.ReLU()
def __A ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.conv(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = self.batch_norm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.activation(UpperCamelCase__ )
return output
class lowercase__ ( nn.Module):
def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = [
nn.AdaptiveAvgPoolad(UpperCamelCase__ ),
UperNetConvModule(UpperCamelCase__ , UpperCamelCase__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = input
for layer in self.layers:
SCREAMING_SNAKE_CASE : List[str] = layer(UpperCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module):
def __init__( self : Tuple , UpperCamelCase__ : Tuple[int, ...] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = pool_scales
SCREAMING_SNAKE_CASE : Any = align_corners
SCREAMING_SNAKE_CASE : Any = in_channels
SCREAMING_SNAKE_CASE : Optional[int] = channels
SCREAMING_SNAKE_CASE : str = []
for i, pool_scale in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=UpperCamelCase__ , in_channels=UpperCamelCase__ , channels=UpperCamelCase__ )
self.blocks.append(UpperCamelCase__ )
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE : Any = ppm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = nn.functional.interpolate(
UpperCamelCase__ , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(UpperCamelCase__ )
return ppm_outs
class lowercase__ ( nn.Module):
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = config
SCREAMING_SNAKE_CASE : Union[str, Any] = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE : int = in_channels
SCREAMING_SNAKE_CASE : List[Any] = config.hidden_size
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE : Any = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE : int = nn.ModuleList()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetConvModule(UpperCamelCase__ , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE : List[str] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCamelCase__ )
self.fpn_convs.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
self.apply(self._init_weights )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = inputs[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = [x]
psp_outs.extend(self.psp_modules(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : str = torch.cat(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : Dict = self.bottleneck(UpperCamelCase__ )
return output
def __A ( self : Any , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCamelCase__ ) )
# build top-down path
SCREAMING_SNAKE_CASE : str = len(UpperCamelCase__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : int = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCamelCase__ , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : Dict = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.fpn_bottleneck(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.classifier(UpperCamelCase__ )
return output
class lowercase__ ( nn.Module):
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = config
SCREAMING_SNAKE_CASE : Tuple = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE : Tuple = config.auxiliary_channels
SCREAMING_SNAKE_CASE : str = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE : Optional[int] = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE : Tuple = in_index
SCREAMING_SNAKE_CASE : Optional[Any] = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE : Union[str, Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE : Any = nn.Identity()
else:
SCREAMING_SNAKE_CASE : Any = nn.Sequential(*UpperCamelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __A ( self : List[str] ):
'''simple docstring'''
self.apply(self._init_weights )
def __A ( self : Any , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE : Optional[Any] = self.convs(UpperCamelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE : Any = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE : Optional[int] = self.classifier(UpperCamelCase__ )
return output
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = UperNetConfig
UpperCamelCase_ = """pixel_values"""
UpperCamelCase_ = True
def __A ( self : List[str] , UpperCamelCase__ : int ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __A ( self : Dict ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = value
__UpperCamelCase : List[Any] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCamelCase : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , UpperCamelCase_ , )
class lowercase__ ( UpperCamelCase_):
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetHead(UpperCamelCase__ , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetFCNHead(UpperCamelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC )
def __A ( self : str , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Any = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE : Any = self.backbone.forward_with_filtered_kwargs(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , output_attentions=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = outputs.feature_maps
SCREAMING_SNAKE_CASE : int = self.decode_head(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = nn.functional.interpolate(UpperCamelCase__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE : Tuple = self.auxiliary_head(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = nn.functional.interpolate(
UpperCamelCase__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE : Optional[int] = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE : Dict = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 358
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = np.full((len(_lowercase ), sequence_length, 2) , _lowercase )
else:
SCREAMING_SNAKE_CASE : List[Any] = np.full((len(_lowercase ), sequence_length) , _lowercase )
for i, tensor in enumerate(_lowercase ):
if padding_side == "right":
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE : Any = tensor[:sequence_length]
else:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ord(_lowercase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
SCREAMING_SNAKE_CASE : Optional[Any] = unicodedata.category(_lowercase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
UpperCamelCase_ = True
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = -100
UpperCamelCase_ = "pt"
def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : str = '''label''' if '''label''' in features[0].keys() else '''labels'''
SCREAMING_SNAKE_CASE : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.pad(
UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(batch['''entity_ids'''] ).shape[1]
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE : int = [
list(UpperCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) for label in labels
]
else:
SCREAMING_SNAKE_CASE : str = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) + list(UpperCamelCase__ ) for label in labels
]
SCREAMING_SNAKE_CASE : List[str] = [feature['''ner_tags'''] for feature in features]
SCREAMING_SNAKE_CASE : Dict = padding_tensor(UpperCamelCase__ , -1 , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = [feature['''original_entity_spans'''] for feature in features]
SCREAMING_SNAKE_CASE : Dict = padding_tensor(UpperCamelCase__ , (-1, -1) , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.tensor(UpperCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 258
| 0
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , SCREAMING_SNAKE_CASE , )
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__ : Optional[Any] =[image]
if isinstance(image[0] , PIL.Image.Image ):
a__ , a__ : List[str] =image[0].size
a__ , a__ : List[Any] =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
a__ : Union[str, Any] =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
a__ : Optional[Any] =np.concatenate(SCREAMING_SNAKE_CASE , axis=0 )
a__ : Optional[Any] =np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 2_5_5.0
a__ : List[Any] =image.transpose(0 , 3 , 1 , 2 )
a__ : Tuple =2.0 * image - 1.0
a__ : Optional[int] =torch.from_numpy(SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
a__ : List[str] =torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
return image
def _A ( SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return mask
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__ : Dict =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
a__ , a__ : Optional[int] =mask[0].size
a__ , a__ : Dict =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : List[str] =[np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
a__ : int =np.concatenate(SCREAMING_SNAKE_CASE , axis=0 )
a__ : List[Any] =mask.astype(np.floataa ) / 2_5_5.0
a__ : str =0
a__ : int =1
a__ : Union[str, Any] =torch.from_numpy(SCREAMING_SNAKE_CASE )
elif isinstance(mask[0] , torch.Tensor ):
a__ : Any =torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
return mask
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : UNetaDModel
_lowercase : RePaintScheduler
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2_5_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1_0 , lowerCAmelCase__ = 1_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
a__ : List[str] =image
a__ : Tuple =_preprocess_image(lowerCAmelCase__ )
a__ : int =original_image.to(device=self.device , dtype=self.unet.dtype )
a__ : Tuple =_preprocess_mask(lowerCAmelCase__ )
a__ : Optional[int] =mask_image.to(device=self.device , dtype=self.unet.dtype )
a__ : Dict =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
a__ : Union[str, Any] =original_image.shape
a__ : Dict =randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.device )
a__ : int =eta
a__ : Tuple =self.scheduler.timesteps[0] + 1
a__ : Optional[Any] =generator[0] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
a__ : str =self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# compute previous image: x_t -> x_t-1
a__ : int =self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
a__ : Any =self.scheduler.undo_step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[Any] =t
a__ : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
a__ : str =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : List[Any] =self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 95
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95
| 1
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase__ = """A painting of a squirrel eating a burger"""
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(__lowerCAmelCase )
UpperCamelCase__ = replicate(__lowerCAmelCase )
UpperCamelCase__ = shard(__lowerCAmelCase )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(__lowerCAmelCase , jax.device_count() )
UpperCamelCase__ = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ):
UpperCamelCase__ = """stabilityai/stable-diffusion-2"""
UpperCamelCase__ , UpperCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(__lowerCAmelCase , subfolder="""scheduler""" )
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase__ = scheduler_params
UpperCamelCase__ = """A painting of a squirrel eating a burger"""
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(__lowerCAmelCase )
UpperCamelCase__ = replicate(__lowerCAmelCase )
UpperCamelCase__ = shard(__lowerCAmelCase )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(__lowerCAmelCase , jax.device_count() )
UpperCamelCase__ = sd_pipe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_inference_steps=25 , jit=__lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 363
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if hor == 128:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 64, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCamelCase__ = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCamelCase__ = model
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 87
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Optional[int] = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''van'''
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=224 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Any=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[Any]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : Tuple=[64, 128, 320, 512] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-6 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = image_size
__a = num_channels
__a = patch_sizes
__a = strides
__a = hidden_sizes
__a = depths
__a = mlp_ratios
__a = hidden_act
__a = initializer_range
__a = layer_norm_eps
__a = layer_scale_init_value
__a = drop_path_rate
__a = dropout_rate
| 49
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->str:
'''simple docstring'''
a : Optional[Any] = Mock()
a : Dict = conn, Mock()
a : Union[str, Any] = iter([1, None] )
a : Optional[int] = lambda _lowercase : next(_lowercase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 105
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowercase: Optional[Any] = None
_lowercase: Any = logging.get_logger(__name__)
_lowercase: Any = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase: str = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowercase: int = {
"google/rembert": 256,
}
_lowercase: Dict = "▁"
class _lowercase ( A__ ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = RemBertTokenizer
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<unk>" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<pad>" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ):
"""simple docstring"""
a = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = False if not self.vocab_file else True
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(__snake_case ):
logger.error("Vocabulary path ({}) should be a directory".format(__snake_case ) )
return
a = os.path.join(
__snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 353
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 71
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self ,snake_case ,snake_case=13 ,snake_case=2 ,snake_case=24 ,snake_case=16 ,snake_case=True ,snake_case=True ,snake_case=32 ,snake_case=5 ,snake_case=4 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=10 ,snake_case=0.02 ,snake_case=None ,snake_case=2 ,snake_case=2 ,):
'''simple docstring'''
lowercase : List[str] = parent
lowercase : Tuple = batch_size
lowercase : List[str] = patch_size
lowercase : List[str] = max_length
lowercase : Union[str, Any] = num_mel_bins
lowercase : Any = is_training
lowercase : str = use_labels
lowercase : List[Any] = hidden_size
lowercase : Any = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : Optional[int] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Optional[Any] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : Any = scope
lowercase : Any = frequency_stride
lowercase : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase : Union[str, Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase : List[str] = frequency_out_dimension * time_out_dimension
lowercase : List[Any] = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase : List[Any] = None
if self.use_labels:
lowercase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Tuple = self.get_config()
return config, input_values, labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = ASTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[Any] = config_and_inputs
lowercase : Optional[int] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Union[str, Any]= (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_a : Any= (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
_a : Union[str, Any]= False
_a : Tuple= False
_a : int= False
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = ASTModelTester(self )
lowercase : Any = ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int = model_class(snake_case )
lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int = [*signature.parameters.keys()]
lowercase : Union[str, Any] = ["""input_values"""]
self.assertListEqual(arg_names[:1] ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = ASTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _snake_case( ) -> Union[str, Any]:
lowercase : Optional[int] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
lowercase , lowercase : List[str] = torchaudio.load(__lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.default_feature_extractor
lowercase : int = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(snake_case )
lowercase : List[str] = self.default_feature_extractor
lowercase , lowercase : int = prepare_audio()
lowercase : Dict = audio.squeeze().numpy()
lowercase : Union[str, Any] = feature_extractor(snake_case ,sampling_rate=snake_case ,return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
lowercase : Union[str, Any] = model(**snake_case )
# verify the logits
lowercase : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape ,snake_case )
lowercase : str = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,snake_case ,atol=1e-4 ) )
| 20
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = WavaVecaForSequenceClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""projector.weight"""]
a = downstream_dict["""projector.bias"""]
a = downstream_dict["""model.post_net.linear.weight"""]
a = downstream_dict["""model.post_net.linear.bias"""]
return model
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = WavaVecaForAudioFrameClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""model.linear.weight"""]
a = downstream_dict["""model.linear.bias"""]
return model
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
a = WavaVecaForXVector.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""connector.weight"""]
a = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
a = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
a = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
a = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = torch.load(__lowerCamelCase , map_location="""cpu""" )
a = checkpoint["""Downstream"""]
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
a = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , do_normalize=__lowerCamelCase )
a = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
a = convert_classification(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
a = convert_diarization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
a = convert_xvector(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
a = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__UpperCamelCase : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 228
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'config.json' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : str=False ) -> List[Any]:
'''simple docstring'''
_snake_case = 2
if unlogit:
_snake_case = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = p * torch.log(UpperCamelCase__ )
_snake_case = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
_snake_case = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case = None
_snake_case = 0.0
_snake_case = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case = tuple(t.to(args.device ) for t in inputs )
((_snake_case ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
_snake_case = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case = 2
_snake_case = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCamelCase__ )
logger.info('Head ranked by importance scores' )
_snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case = torch.arange(
head_importance.numel() , device=args.device )
_snake_case = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> int:
'''simple docstring'''
_snake_case , _snake_case , _snake_case = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
_snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCamelCase__ , original_score * args.masking_threshold )
_snake_case = torch.ones_like(UpperCamelCase__ )
_snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case = float('Inf' )
_snake_case = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case = new_head_mask.view(-1 )
_snake_case = 0.0
_snake_case = new_head_mask.view_as(UpperCamelCase__ )
_snake_case = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCamelCase__ , UpperCamelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCamelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCamelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCamelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=42 )
parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
_snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case = torch.device('cuda' , args.local_rank )
_snake_case = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
_snake_case = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCamelCase__ )
# Prepare dataset
_snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case = (torch.from_numpy(UpperCamelCase__ ),)
_snake_case = TensorDataset(*UpperCamelCase__ )
_snake_case = RandomSampler(UpperCamelCase__ )
_snake_case = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 361
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 0
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def A ( snake_case :np.ndarray , snake_case :tuple[int, int] , snake_case :tuple[int, int] , snake_case :bool , ) -> tuple[float | int, list[tuple[int, int]]]:
__UpperCamelCase , __UpperCamelCase = grid.shape
__UpperCamelCase = [-1, 1, 0, 0]
__UpperCamelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__UpperCamelCase , __UpperCamelCase = [(0, source)], set()
__UpperCamelCase = np.full((rows, cols) , np.inf )
__UpperCamelCase = 0
__UpperCamelCase = np.empty((rows, cols) , dtype=snake_case )
__UpperCamelCase = None
while queue:
((__UpperCamelCase) , (__UpperCamelCase)) = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__UpperCamelCase = []
while (x, y) != source:
path.append((x, y) )
__UpperCamelCase , __UpperCamelCase = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__UpperCamelCase , __UpperCamelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__UpperCamelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__UpperCamelCase = dist + 1
__UpperCamelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase ( snake_case_ ):
'''simple docstring'''
lowercase : Tuple ="""realm"""
def __init__( self , UpperCamelCase_=3_0522 , UpperCamelCase_=768 , UpperCamelCase_=128 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=256 , UpperCamelCase_=10 , UpperCamelCase_=1E-3 , UpperCamelCase_=5 , UpperCamelCase_=320 , UpperCamelCase_=1335_3718 , UpperCamelCase_=5000 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
# Common config
lowercase_ :Any = vocab_size
lowercase_ :Union[str, Any] = max_position_embeddings
lowercase_ :Optional[int] = hidden_size
lowercase_ :Any = retriever_proj_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :int = num_attention_heads
lowercase_ :Optional[int] = num_candidates
lowercase_ :Optional[int] = intermediate_size
lowercase_ :Any = hidden_act
lowercase_ :Union[str, Any] = hidden_dropout_prob
lowercase_ :Dict = attention_probs_dropout_prob
lowercase_ :List[Any] = initializer_range
lowercase_ :Dict = type_vocab_size
lowercase_ :int = layer_norm_eps
# Reader config
lowercase_ :Any = span_hidden_size
lowercase_ :Optional[Any] = max_span_width
lowercase_ :str = reader_layer_norm_eps
lowercase_ :Tuple = reader_beam_size
lowercase_ :str = reader_seq_len
# Retrieval config
lowercase_ :Optional[Any] = num_block_records
lowercase_ :Tuple = searcher_beam_size
| 353
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( _a , _a , _a ) -> Tuple:
'''simple docstring'''
if gpta_config_file == "":
lowercase_ :int = GPTaConfig()
else:
lowercase_ :Union[str, Any] = GPTaConfig.from_json_file(_a )
lowercase_ :List[str] = GPTaModel(_a )
# Load weights from numpy
load_tf_weights_in_gpta(_a , _a , _a )
# Save pytorch-model
lowercase_ :Dict = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase_ :int = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 252
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : List[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
comm_check_on_output(snake_case_ )
_UpperCAmelCase = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase ( self : Any ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowercase ( self : int ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE :Dict = 1e-4
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase ( self : List[Any] ):
_UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : int ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase = inputs["pixel_values"].to(snake_case_ )
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 22
| 1
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : int , _snake_case : List[Any] , _snake_case : List[Any] ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( _snake_case : np.ndarray , _snake_case : Optional[str] , _snake_case : Optional[str] ):
lowerCAmelCase : str = to_pil_image(_snake_case )
lowerCAmelCase : Any = pil_image.size
lowerCAmelCase : Optional[Any] = pytesseract.image_to_data(_snake_case , lang=_snake_case , output_type='''dict''' , config=_snake_case )
lowerCAmelCase : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase : int = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
lowerCAmelCase : int = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
lowerCAmelCase : Any = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
lowerCAmelCase : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
lowerCAmelCase : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
lowerCAmelCase : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase : List[str] = []
for x, y, w, h in zip(_snake_case , _snake_case , _snake_case , _snake_case ):
lowerCAmelCase : Tuple = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
lowerCAmelCase : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case , _snake_case , _snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case_( a__ ):
__UpperCamelCase = ['''pixel_values''']
def __init__( self : Union[str, Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : float = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = "" , **UpperCamelCase_ : int , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = do_resize
lowerCAmelCase : str = size
lowerCAmelCase : List[str] = resample
lowerCAmelCase : Optional[int] = do_rescale
lowerCAmelCase : int = rescale_value
lowerCAmelCase : Optional[Any] = do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase : Any = apply_ocr
lowerCAmelCase : Tuple = ocr_lang
lowerCAmelCase : Union[str, Any] = tesseract_config
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ):
lowerCAmelCase : List[str] = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase : int = (size['''height'''], size['''width'''])
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, Iterable[float]] , UpperCamelCase_ : Union[float, Iterable[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : Union[float, Iterable[float]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : str = size if size is not None else self.size
lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ )
lowerCAmelCase : Any = resample if resample is not None else self.resample
lowerCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : int = image_std if image_std is not None else self.image_std
lowerCAmelCase : int = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase : Optional[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase : List[Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase : Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : str = []
for image in images:
lowerCAmelCase : Union[str, Any] = apply_tesseract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
words_batch.append(UpperCamelCase_ )
boxes_batch.append(UpperCamelCase_ )
if do_resize:
lowerCAmelCase : int = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase : Dict = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase : int = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
lowerCAmelCase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowerCAmelCase : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCamelCase_ )
if apply_ocr:
lowerCAmelCase : Tuple = words_batch
lowerCAmelCase : Dict = boxes_batch
return data
| 359
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314
| 0
|
"""simple docstring"""
def A_ ( ):
'''simple docstring'''
snake_case_ :int = []
snake_case_ :int = 1
while len(_lowercase ) < 1e6:
constant.append(str(_lowercase ) )
i += 1
snake_case_ :str = """""".join(_lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 66
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Optional[int] ) ->Any:
super().setUp()
snake_case__ : Tuple = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
snake_case__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Union[str, Any], **_snake_case : List[Any] ) ->Dict:
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Dict:
snake_case__ : List[Any] = '<unk> UNwanted , running'
snake_case__ : List[Any] = '<unk> unwanted, running'
return input_text, output_text
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : Dict = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=_snake_case )
snake_case__ : str = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_snake_case, ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), [0, 4, 8, 7] )
def lowercase_ ( self : List[str] ) ->List[Any]:
snake_case__ : str = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : Optional[int] = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=_snake_case )
snake_case__ : Dict = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
snake_case__ : List[Any] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_snake_case ), _snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ), _snake_case )
def lowercase_ ( self : Dict ) ->Any:
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[Any] = len(_snake_case )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ), [1] )
self.assertEqual(tokenizer.decode([1] ), 'new1' )
| 277
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : int = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __UpperCamelCase ( lowercase__ ):
A_ = '''wavlm'''
def __init__( self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1E-5 , __a="group" , __a="gelu" , __a=(512, 512, 512, 512, 512, 512, 512) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=128 , __a=16 , __a=320 , __a=800 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=320 , __a=2 , __a=0.1 , __a=100 , __a=256 , __a=256 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=256 , __a=(512, 512, 512, 512, 1500) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=512 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ):
'''simple docstring'''
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__a : Any = hidden_size
__a : Union[str, Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Optional[int] = list(_a )
__a : str = list(_a )
__a : Dict = list(_a )
__a : int = conv_bias
__a : Dict = num_buckets
__a : Optional[Any] = max_bucket_distance
__a : List[str] = num_conv_pos_embeddings
__a : int = num_conv_pos_embedding_groups
__a : Union[str, Any] = len(self.conv_dim )
__a : Optional[int] = num_hidden_layers
__a : Any = intermediate_size
__a : Any = hidden_act
__a : Optional[int] = num_attention_heads
__a : str = hidden_dropout
__a : Tuple = attention_dropout
__a : Union[str, Any] = activation_dropout
__a : str = feat_proj_dropout
__a : str = final_dropout
__a : List[str] = layerdrop
__a : List[str] = layer_norm_eps
__a : str = initializer_range
__a : str = num_ctc_classes
__a : Union[str, Any] = vocab_size
__a : Dict = do_stable_layer_norm
__a : Optional[int] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : List[str] = apply_spec_augment
__a : Tuple = mask_time_prob
__a : Optional[Any] = mask_time_length
__a : List[str] = mask_time_min_masks
__a : Tuple = mask_feature_prob
__a : List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Any = num_codevector_groups
__a : Any = contrastive_logits_temperature
__a : Union[str, Any] = num_negatives
__a : Tuple = codevector_dim
__a : Tuple = proj_codevector_dim
__a : List[Any] = diversity_loss_weight
# ctc loss
__a : int = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# adapter
__a : Optional[Any] = add_adapter
__a : int = adapter_kernel_size
__a : List[Any] = adapter_stride
__a : Tuple = num_adapter_layers
__a : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a : Optional[int] = list(_a )
__a : Dict = list(_a )
__a : str = list(_a )
__a : List[str] = xvector_output_dim
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 355
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 0
|
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase__ ):
_UpperCAmelCase :Any = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_UpperCAmelCase :int = """CIDAS/clipseg-rd64-refined"""
_UpperCAmelCase :List[Any] = """image_segmenter"""
_UpperCAmelCase :str = CLIPSegForImageSegmentation
_UpperCAmelCase :List[Any] = ["""image""", """text"""]
_UpperCAmelCase :int = ["""image"""]
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors="pt" )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
with torch.no_grad():
UpperCamelCase : str = self.model(**__a ).logits
return logits
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = outputs.cpu().detach().numpy()
UpperCamelCase : int = 0
UpperCamelCase : str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 52
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1
| 0
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Tuple = {'vocab_file': 'vocab.txt'}
snake_case__ : str = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
snake_case__ : str = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def _a ( lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCamelCase , '''r''' ) as f:
__A = f.read().splitlines()
return [l.strip() for l in lines]
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__(self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Any="<unk>" , _UpperCamelCase :Tuple="<cls>" , _UpperCamelCase :List[str]="<pad>" , _UpperCamelCase :str="<mask>" , _UpperCamelCase :Tuple="<eos>" , **_UpperCamelCase :Optional[int] , )-> Optional[int]:
super().__init__(**_UpperCamelCase )
__A = load_vocab_file(_UpperCamelCase )
__A = dict(enumerate(self.all_tokens ) )
__A = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__A = unk_token
__A = cls_token
__A = pad_token
__A = mask_token
__A = eos_token
__A = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase (self :str , _UpperCamelCase :int )-> str:
return self._id_to_token.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str )-> int:
return self._token_to_id.get(_UpperCamelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase (self :str , _UpperCamelCase :str , **_UpperCamelCase :Union[str, Any] )-> str:
return text.split()
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Dict=False )-> Dict:
return len(self._id_to_token )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[Any]:
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str )-> int:
return self._token_to_id.get(_UpperCamelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :int )-> str:
return self._id_to_token.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.cls_token_id]
__A = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List , _UpperCamelCase :Optional[List] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__A = [1] + ([0] * len(_UpperCamelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_UpperCamelCase ) + [1]
return mask
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] )-> Union[str, Any]:
__A = os.path.join(_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(_UpperCamelCase , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase (self :Optional[Any] )-> int:
return self.get_vocab_size(with_added_tokens=_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Union[List[str], List[AddedToken]] , _UpperCamelCase :bool = False )-> int:
return super()._add_tokens(_UpperCamelCase , special_tokens=_UpperCamelCase )
| 250
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case__ : Any = 'main'
# Default branch name
snake_case__ : Union[str, Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case__ : Optional[int] = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case__ : int = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _a ( ) -> Tuple:
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ) -> Optional[int]:
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Any )-> Optional[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class A_ ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :str , _UpperCamelCase :str )-> Optional[int]:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Union[str, Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :int , _UpperCamelCase :Union[str, Any] )-> int:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def _lowerCAmelCase (self :int )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_tf
def _lowerCAmelCase (self :Any )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_flax
def _lowerCAmelCase (self :Optional[int] )-> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 250
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_lowerCamelCase):
A_ : Optional[int] = ['keras_nlp']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ['keras_nlp'] )
| 86
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "spiece.model"}
_lowerCAmelCase = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
_lowerCAmelCase = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_lowerCAmelCase = "▁"
class _SCREAMING_SNAKE_CASE ( __lowercase ):
__SCREAMING_SNAKE_CASE :Dict = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , a__ : Optional[int] , a__ : Tuple=True , a__ : Any=True , a__ : List[Any]=False , a__ : List[str]="[CLS]" , a__ : List[Any]="[SEP]" , a__ : List[str]="<unk>" , a__ : Dict="[SEP]" , a__ : Tuple="<pad>" , a__ : str="[CLS]" , a__ : List[str]="[MASK]" , a__ : Optional[Dict[str, Any]] = None , **a__ : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
__magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
__magic_name__ = do_lower_case
__magic_name__ = remove_space
__magic_name__ = keep_accents
__magic_name__ = vocab_file
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def snake_case__ ( self : Optional[Any] ):
return len(self.sp_model )
def snake_case__ ( self : List[Any] ):
__magic_name__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
__magic_name__ = self.__dict__.copy()
__magic_name__ = None
return state
def __setstate__( self : Any , a__ : Optional[int] ):
__magic_name__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ = {}
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Optional[Any] , a__ : Union[str, Any] ):
if self.remove_space:
__magic_name__ = ''' '''.join(inputs.strip().split() )
else:
__magic_name__ = inputs
__magic_name__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__magic_name__ = unicodedata.normalize('''NFKD''' , snake_case_ )
__magic_name__ = ''''''.join([c for c in outputs if not unicodedata.combining(snake_case_ )] )
if self.do_lower_case:
__magic_name__ = outputs.lower()
return outputs
def snake_case__ ( self : Optional[int] , a__ : str ):
__magic_name__ = self.preprocess_text(snake_case_ )
__magic_name__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
__magic_name__ = []
for piece in pieces:
if len(snake_case_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__magic_name__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ = cur_pieces[1:]
else:
__magic_name__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case_ )
else:
new_pieces.append(snake_case_ )
return new_pieces
def snake_case__ ( self : List[Any] , a__ : Dict ):
return self.sp_model.PieceToId(snake_case_ )
def snake_case__ ( self : int , a__ : Tuple ):
return self.sp_model.IdToPiece(snake_case_ )
def snake_case__ ( self : Union[str, Any] , a__ : Union[str, Any] ):
__magic_name__ = []
__magic_name__ = ''''''
__magic_name__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
__magic_name__ = True
__magic_name__ = []
else:
current_sub_tokens.append(snake_case_ )
__magic_name__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def snake_case__ ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : str , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def snake_case__ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : str , a__ : str , a__ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
__magic_name__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 352
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( a , a , a , a=1024 ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = [], []
__magic_name__ = list(zip(a , a ) )
__magic_name__ , __magic_name__ = sorted_examples[0]
def is_too_big(a ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__magic_name__ = new_src + ''' ''' + src
__magic_name__ = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
__magic_name__ , __magic_name__ = src, tgt
else: # can fit, keep adding
__magic_name__ , __magic_name__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def UpperCamelCase ( a , a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ , __magic_name__ = pack_examples(a , a , a , a )
print(F'''packed {split} split from {len(a )} examples -> {len(a )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(a , save_path / F'''{split}.source''' )
shutil.copyfile(a , save_path / F'''{split}.target''' )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
__magic_name__ = parser.parse_args()
__magic_name__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 98
| 0
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = 0
@slow
def _snake_case ( self : int ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
def _snake_case ( self : Optional[int] ):
snake_case_ : str = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _snake_case ( self : Optional[int] ):
snake_case_ : int = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _snake_case ( self : List[Any] ):
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
# Check that tokenizer_type ≠ model_type
snake_case_ : Dict = AutoTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _snake_case ( self : str ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(snake_case__ , '''vocab.txt''' ) )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''bert''' , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(snake_case__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(snake_case__ , '''merges.txt''' ) )
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''gpt2''' , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@require_tokenizers
def _snake_case ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(snake_case__ , '''vocab.txt''' ) )
snake_case_ : int = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''bert''' )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(snake_case__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(snake_case__ , '''merges.txt''' ) )
snake_case_ : int = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''gpt2''' )
self.assertIsInstance(snake_case__ , snake_case__ )
def _snake_case ( self : int ):
with pytest.raises(snake_case__ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def _snake_case ( self : Dict ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case_ : Union[str, Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , snake_case__ )
else:
self.assertEqual(tokenizer.do_lower_case , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _snake_case ( self : int ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
snake_case__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
snake_case_ : Tuple = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = TOKENIZER_MAPPING.values()
snake_case_ : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(snake_case__ )
@require_tokenizers
def _snake_case ( self : List[str] ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=snake_case__ ) , snake_case__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , snake_case__ )
@require_tokenizers
def _snake_case ( self : Optional[int] ):
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=snake_case__ )
snake_case_ : Dict = '''Hello, world. How are you?'''
snake_case_ : int = tokenizer.tokenize(snake_case__ )
self.assertEqual('''[UNK]''' , tokens[0] )
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=snake_case__ )
snake_case_ : str = tokenizer.tokenize(snake_case__ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def _snake_case ( self : Dict ):
snake_case_ : str = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : str = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _snake_case ( self : Any ):
snake_case_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(snake_case__ , snake_case__ )
def _snake_case ( self : Tuple ):
snake_case_ : str = get_tokenizer_config('''bert-base-cased''' )
snake_case_ : Any = config.pop('''_commit_hash''' , snake_case__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(snake_case__ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case_ : Dict = get_tokenizer_config(snake_case__ )
self.assertDictEqual(snake_case__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case_ : List[str] = AutoTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
snake_case_ : Dict = get_tokenizer_config(snake_case__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def _snake_case ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
snake_case_ : Tuple = CustomTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
snake_case_ : str = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _snake_case ( self : Dict ):
try:
AutoConfig.register('''custom''' , snake_case__ )
# Can register in two steps
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
snake_case__ , slow_tokenizer_class=snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : int = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
snake_case_ : Dict = CustomTokenizerFast.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(snake_case__ , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self : List[str] ):
with self.assertRaises(snake_case__ ):
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
snake_case_ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
snake_case_ : int = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def _snake_case ( self : int ):
class _UpperCAmelCase ( a_):
_lowerCAmelCase : str = False
class _UpperCAmelCase ( a_):
_lowerCAmelCase : List[Any] = NewTokenizer
_lowerCAmelCase : List[Any] = False
try:
AutoConfig.register('''custom''' , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# If remote code is not set, the default is to use local
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case_ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _snake_case ( self : Tuple ):
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _snake_case ( self : int ):
with self.assertRaisesRegex(
snake_case__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case_ : List[Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def _snake_case ( self : int ):
with self.assertRaisesRegex(
snake_case__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case__ , revision='''aaaaaa''' )
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 264
|
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = len(_lowerCAmelCase)
UpperCamelCase_ = len(matrix[0])
UpperCamelCase_ = min(_lowerCAmelCase , _lowerCAmelCase)
for row in range(_lowerCAmelCase):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCAmelCase):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(_lowerCAmelCase , _lowerCAmelCase):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , _lowerCAmelCase):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(_lowerCAmelCase):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCamelCase_( snake_case : Any , snake_case : Union[str, Any]="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) as f:
snake_case_ = json.load(snake_case )
snake_case_ = {}
snake_case_ = []
snake_case_ = []
for key, info in class_info.items():
snake_case_ = info['name']
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(snake_case ) )
snake_case_ = thing_ids
snake_case_ = class_names
return metadata
class _snake_case ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=None , a__=True , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=10 , a__=False , a__=255 , a__="shi-labs/oneformer_demo" , a__="ade20k_panoptic.json" , a__=10 , ) -> Tuple:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = class_info_file
snake_case_ = prepare_metadata(a__ , a__ )
snake_case_ = num_text
snake_case_ = repo_path
# for the post_process_functions
snake_case_ = 2
snake_case_ = 10
snake_case_ = 10
snake_case_ = 3
snake_case_ = 4
snake_case_ = num_labels
snake_case_ = do_reduce_labels
snake_case_ = ignore_index
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCAmelCase__ ( self , a__ , a__=False ) -> int:
'''simple docstring'''
if not batched:
snake_case_ = image_inputs[0]
if isinstance(a__ , Image.Image ):
snake_case_ = image.size
else:
snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size["shortest_edge"] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(a__ , key=lambda a__ : item[0] )[0]
snake_case_ = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _snake_case ( snake_case_ , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCAmelCase_ : List[Any] = image_processing_class
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = OneFormerImageProcessorTester(self )
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , "image_mean" ) )
self.assertTrue(hasattr(a__ , "image_std" ) )
self.assertTrue(hasattr(a__ , "do_normalize" ) )
self.assertTrue(hasattr(a__ , "do_resize" ) )
self.assertTrue(hasattr(a__ , "size" ) )
self.assertTrue(hasattr(a__ , "ignore_index" ) )
self.assertTrue(hasattr(a__ , "class_info_file" ) )
self.assertTrue(hasattr(a__ , "num_text" ) )
self.assertTrue(hasattr(a__ , "repo_path" ) )
self.assertTrue(hasattr(a__ , "metadata" ) )
self.assertTrue(hasattr(a__ , "do_reduce_labels" ) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
snake_case_ = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processing_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = self.image_processing_tester.get_expected_values(a__ , batched=a__ )
snake_case_ = image_processor(
a__ , ["semantic"] * len(a__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
snake_case_ = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processing_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = self.image_processing_tester.get_expected_values(a__ , batched=a__ )
snake_case_ = image_processor(
a__ , ["semantic"] * len(a__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
snake_case_ = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processing_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = self.image_processing_tester.get_expected_values(a__ , batched=a__ )
snake_case_ = image_processor(
a__ , ["semantic"] * len(a__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self , a__=False , a__=False , a__="np" ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
snake_case_ = self.image_processing_tester.num_labels
snake_case_ = None
snake_case_ = None
snake_case_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ )
if with_segmentation_maps:
snake_case_ = num_labels
if is_instance_map:
snake_case_ = list(range(a__ ) ) * 2
snake_case_ = dict(enumerate(a__ ) )
snake_case_ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
snake_case_ = [Image.fromarray(a__ ) for annotation in annotations]
snake_case_ = image_processor(
a__ , ["semantic"] * len(a__ ) , a__ , return_tensors="pt" , instance_id_to_semantic_id=a__ , pad_and_return_pixel_mask=a__ , )
return inputs
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
def common(a__=False , a__=None ):
snake_case_ = self.comm_get_image_processor_inputs(
with_segmentation_maps=a__ , is_instance_map=a__ , segmentation_type=a__ )
snake_case_ = inputs['mask_labels']
snake_case_ = inputs['class_labels']
snake_case_ = inputs['pixel_values']
snake_case_ = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(a__ , a__ , a__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(a__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=a__ )
common(is_instance_map=a__ , segmentation_type="pil" )
common(is_instance_map=a__ , segmentation_type="pil" )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = np.zeros((20, 50) )
snake_case_ = 1
snake_case_ = 1
snake_case_ = 1
snake_case_ = binary_mask_to_rle(a__ )
self.assertEqual(len(a__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case_ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ = fature_extractor.post_process_semantic_segmentation(a__ )
self.assertEqual(len(a__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
snake_case_ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
snake_case_ = fature_extractor.post_process_semantic_segmentation(a__ , target_sizes=a__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case_ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ = image_processor.post_process_instance_segmentation(a__ , threshold=0 )
self.assertTrue(len(a__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , a__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case_ = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case_ = image_processor.post_process_panoptic_segmentation(a__ , threshold=0 )
self.assertTrue(len(a__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , a__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 356
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case : np.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(snake_case )
if rows != columns:
snake_case_ = (
"'table' has to be of square shaped array but got a "
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(snake_case )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(snake_case ):
for j in range(snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(snake_case , snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
| 0
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : int , __A : str , __A : Tuple=2 , __A : Optional[Any]=3 , __A : List[str]=4 , __A : Union[str, Any]=2 , __A : str=7 , __A : Tuple=True , __A : List[Any]=True , __A : Tuple=True , __A : int=True , __A : List[Any]=9_9 , __A : Any=3_6 , __A : Any=3 , __A : str=4 , __A : Tuple=3_7 , __A : Union[str, Any]="gelu" , __A : Tuple=0.1 , __A : int=0.1 , __A : Tuple=5_1_2 , __A : List[Any]=1_6 , __A : Any=2 , __A : Optional[int]=0.02 , __A : Any=6 , __A : Optional[Any]=6 , __A : Tuple=3 , __A : Optional[int]=4 , __A : str=None , __A : Tuple=1_0_0_0 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = text_seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = coordinate_size
__UpperCamelCase = shape_size
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
__UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCamelCase = text_seq_length
__UpperCamelCase = (image_size // patch_size) ** 2 + 1
__UpperCamelCase = self.text_seq_length + self.image_seq_length
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCamelCase = bbox[i, j, 3]
__UpperCamelCase = bbox[i, j, 1]
__UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCamelCase = bbox[i, j, 2]
__UpperCamelCase = bbox[i, j, 0]
__UpperCamelCase = t
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCamelCase ( self : Any , __A : int , __A : str , __A : Optional[int] , __A : int , __A : Tuple , __A : List[Any] , __A : List[str] , __A : List[str] ):
__UpperCamelCase = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
__UpperCamelCase = model(__A , pixel_values=__A )
__UpperCamelCase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
__UpperCamelCase = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
__UpperCamelCase = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCamelCase = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCamelCase = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowerCamelCase ( self : List[str] , __A : Union[str, Any] , __A : List[Any] , __A : Any , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : int , __A : List[Any] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : List[str] , __A : Dict , __A : Tuple , __A : int , __A : Optional[int] , __A : List[str] , __A : str , __A : Dict , __A : List[str] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowerCamelCase ( self : Tuple , __A : Dict , __A : Dict , __A : List[str] , __A : Tuple , __A : List[str] , __A : Dict , __A : int , __A : Optional[int] ):
__UpperCamelCase = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : str =False
SCREAMING_SNAKE_CASE_ : int =False
SCREAMING_SNAKE_CASE_ : Any =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : int =(
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : Optional[Any] , __A : str , __A : Dict , __A : Optional[int] , __A : Union[str, Any] , __A : Union[str, Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _lowerCamelCase ( self : int ):
__UpperCamelCase = LayoutLMvaModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowerCamelCase ( self : str , __A : Dict , __A : Optional[int] , __A : Tuple=False ):
__UpperCamelCase = copy.deepcopy(__A )
if model_class in get_values(__A ):
__UpperCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
__UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
__UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def _lowerCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase = type
self.model_tester.create_and_check_model(*__A )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def _lowerCamelCase ( self : List[str] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : List[str] ):
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : str ):
__UpperCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(__A )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__A , return_tensors='pt' ).pixel_values.to(__A )
__UpperCamelCase = torch.tensor([[1, 2]] )
__UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__UpperCamelCase = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
__UpperCamelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
__UpperCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
| 53
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
with open(__lowercase ) as metadata_file:
__UpperCamelCase = json.load(__lowercase )
__UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
# Load the entity vocab file
__UpperCamelCase = load_entity_vocab(__lowercase )
__UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase )
__UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
__UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.'''
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
__UpperCamelCase = entity_emb[entity_vocab['[MASK]']]
__UpperCamelCase = LukeModel(config=__lowercase ).eval()
__UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' )
__UpperCamelCase = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__UpperCamelCase = (39, 42)
__UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' )
__UpperCamelCase = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 42, 1024) )
__UpperCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 42, 768) )
__UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 1, 1024) )
__UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 1, 768) )
__UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__lowercase ) )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = {}
with open(__lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowercase ):
__UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' )
__UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ : str =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 53
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """falcon"""
snake_case_ = ["""past_key_values"""]
def __init__( self : List[Any] , __lowercase : Optional[int]=6_50_24 , __lowercase : List[str]=45_44 , __lowercase : Tuple=32 , __lowercase : Tuple=71 , __lowercase : List[str]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Optional[Any]=True , __lowercase : Optional[int]=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : int=None , __lowercase : Union[str, Any]=False , __lowercase : Any=False , __lowercase : Tuple=True , __lowercase : List[str]=True , __lowercase : Optional[Any]=False , __lowercase : Optional[Any]=11 , __lowercase : Optional[Any]=11 , **__lowercase : str , ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE__ : Optional[Any] =kwargs.pop('''n_embed''' , __lowercase )
SCREAMING_SNAKE_CASE__ : int =hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : str =num_attention_heads
SCREAMING_SNAKE_CASE__ : int =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Dict =initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : Dict =bos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] =eos_token_id
SCREAMING_SNAKE_CASE__ : Dict =num_attention_heads if num_kv_heads is None else num_kv_heads
SCREAMING_SNAKE_CASE__ : Any =alibi
SCREAMING_SNAKE_CASE__ : Optional[int] =new_decoder_architecture
SCREAMING_SNAKE_CASE__ : Any =multi_query # Ignored when new_decoder_architecture is True
SCREAMING_SNAKE_CASE__ : List[Any] =parallel_attn
SCREAMING_SNAKE_CASE__ : Tuple =bias
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
@property
def __magic_name__ ( self : Union[str, Any] ) -> int:
return self.hidden_size // self.num_attention_heads
@property
def __magic_name__ ( self : Optional[Any] ) -> Any:
return not self.alibi
| 222
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """biogpt"""
def __init__( self : str , __lowercase : Union[str, Any]=4_23_84 , __lowercase : Union[str, Any]=10_24 , __lowercase : Any=24 , __lowercase : Any=16 , __lowercase : Optional[Any]=40_96 , __lowercase : Any="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=10_24 , __lowercase : List[Any]=0.02 , __lowercase : Tuple=1e-12 , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=True , __lowercase : Any=0.0 , __lowercase : int=0.0 , __lowercase : str=1 , __lowercase : int=0 , __lowercase : Optional[int]=2 , **__lowercase : Dict , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple =num_attention_heads
SCREAMING_SNAKE_CASE__ : Any =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] =scale_embedding
SCREAMING_SNAKE_CASE__ : str =use_cache
SCREAMING_SNAKE_CASE__ : str =layerdrop
SCREAMING_SNAKE_CASE__ : Dict =activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 222
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''▁'''
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
__UpperCamelCase = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
__UpperCamelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = kwargs.get('additional_special_tokens', [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model)
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = src_lang if src_lang is not None else 'en_XX'
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def a_ ( self) -> int:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a_ ( self) -> str:
return self._src_lang
@src_lang.setter
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Dict:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self, lowerCAmelCase__) -> None:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self) -> Dict:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self, lowerCAmelCase__) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = []
snake_case_ = ''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(lowerCAmelCase__)
snake_case_ = False
out_string += self.sp_model.decode(lowerCAmelCase__)
return out_string.strip()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
snake_case_ = [1] * len(self.prefix_tokens)
snake_case_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
snake_case_ = src_lang
snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
snake_case_ = tgt_lang_id
return inputs
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "en_XX", lowerCAmelCase__ = None, lowerCAmelCase__ = "ro_RO", **lowerCAmelCase__, ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[src_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[tgt_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
| 69
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_ ), magnitude * sin(lowerCAmelCase_ )]
return [magnitude * cos(radians(lowerCAmelCase_ ) ), magnitude * sin(radians(lowerCAmelCase_ ) )]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1 )-> bool:
'''simple docstring'''
_UpperCAmelCase : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : float = sum(lowerCAmelCase_ )
return abs(lowerCAmelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
A_ : str = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
A_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A_ : List[str] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
A_ : Tuple = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A_ : Dict = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
A_ : Union[str, Any] = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 215
| 0
|
import argparse
import os
import re
_snake_case = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_snake_case = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
_snake_case = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase ( snake_case__, snake_case__ = False ) -> Optional[int]:
with open(snake_case__, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : Optional[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = content.split("\n" )
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__UpperCAmelCase : Any = len(re.search(r"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__UpperCAmelCase : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__UpperCAmelCase : Optional[int] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__UpperCAmelCase : str = sorted(snake_case__, key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__, "w", encoding="utf-8" ) as f:
f.write("\n".join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def _UpperCamelCase ( snake_case__ = False ) -> str:
__UpperCAmelCase : Dict = [os.path.join(snake_case__, snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith(".py" )]
__UpperCAmelCase : Any = [sort_auto_mapping(snake_case__, overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
__UpperCAmelCase : Dict = [f for f, d in zip(snake_case__, snake_case__ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 342
|
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Optional[Any]=10 , lowerCamelCase__ : List[str]=[10, 20, 30, 40] , lowerCamelCase__ : Optional[Any]=[1, 1, 2, 1] , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]="relu" , lowerCamelCase__ : str=3 , lowerCamelCase__ : Tuple=None , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Dict = embeddings_size
_UpperCAmelCase : List[Any] = hidden_sizes
_UpperCAmelCase : List[Any] = depths
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : Any = use_labels
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Union[str, Any] = num_labels
_UpperCAmelCase : Dict = scope
_UpperCAmelCase : str = len(lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = TFResNetModel(config=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : Dict = TFResNetForImageClassification(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
lowerCAmelCase : List[str] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Optional[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TFResNetModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[str] = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Any ):
_UpperCAmelCase : Tuple = model_class(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = TFResNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase : str = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : int = image_processor(images=lowerCamelCase__ , return_tensors="tf" )
# forward pass
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase__ , atol=1E-4 ) )
| 234
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict = logging.get_logger(__name__)
_A : Union[str, Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = "vit_msn"
def __init__( self : Optional[Any] , A : Dict=7_6_8 , A : Union[str, Any]=1_2 , A : Optional[Any]=1_2 , A : List[Any]=3_0_7_2 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : int=0.0 , A : int=0.02 , A : Tuple=1e-06 , A : int=2_2_4 , A : Union[str, Any]=1_6 , A : Dict=3 , A : Optional[Any]=True , **A : Optional[Any] , ) ->Dict:
super().__init__(**A )
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Tuple = qkv_bias
| 142
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[Any] , _A : Optional[NestedDataStructureLike[PathLike]] = None , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = path_or_paths
UpperCAmelCase__ : str = split if split or isinstance(_A , _A ) else '''train'''
UpperCAmelCase__ : Tuple = features
UpperCAmelCase__ : str = cache_dir
UpperCAmelCase__ : List[Any] = keep_in_memory
UpperCAmelCase__ : Dict = streaming
UpperCAmelCase__ : Optional[Any] = num_proc
UpperCAmelCase__ : str = kwargs
@abstractmethod
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
class lowerCamelCase_ ( __a ):
def __init__( self : List[str] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = features
UpperCAmelCase__ : Optional[Any] = cache_dir
UpperCAmelCase__ : Union[str, Any] = keep_in_memory
UpperCAmelCase__ : Union[str, Any] = streaming
UpperCAmelCase__ : Optional[Any] = num_proc
UpperCAmelCase__ : Optional[int] = kwargs
@abstractmethod
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
| 369
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean
UpperCAmelCase__ : Optional[int] = image_std
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : int = do_pad
def lowercase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Optional[int] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : int = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : str = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase__ : Optional[int] = DetaImageProcessor()
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : int = json.loads(f.read() )
UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase__ : Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.