code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 5_0257 , UpperCAmelCase__ : List[str] = 1024 , UpperCAmelCase__ : List[str] = 768 , UpperCAmelCase__ : str = 12 , UpperCAmelCase__ : Optional[Any] = 12 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : List[str] = "gelu_new" , UpperCAmelCase__ : int = 0.1 , UpperCAmelCase__ : str = 0.1 , UpperCAmelCase__ : str = 0.1 , UpperCAmelCase__ : Optional[int] = 1e-5 , UpperCAmelCase__ : Any = 0.02 , UpperCAmelCase__ : int = True , UpperCAmelCase__ : Union[str, Any] = True , UpperCAmelCase__ : int = False , UpperCAmelCase__ : Tuple = False , ) ->str:
super().__init__()
UpperCAmelCase_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
UpperCAmelCase_ = prefix_inner_dim
UpperCAmelCase_ = prefix_hidden_dim
UpperCAmelCase_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase_ = (
nn.Linear(self.prefix_hidden_dim , _lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase_ = GPTaConfig(
vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , )
UpperCAmelCase_ = GPTaLMHeadModel(_lowerCamelCase )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Optional[int] = None , ) ->int:
UpperCAmelCase_ = self.transformer.transformer.wte(_lowerCamelCase )
UpperCAmelCase_ = self.encode_prefix(_lowerCamelCase )
UpperCAmelCase_ = self.decode_prefix(_lowerCamelCase )
UpperCAmelCase_ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCAmelCase_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCAmelCase_ = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCAmelCase_ = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]:
return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ) ->Any:
return self.encode_prefix(_lowerCamelCase )
@torch.no_grad()
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) ->Dict:
UpperCAmelCase_ = torch.split(_lowerCamelCase , 1 , dim=0 )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for feature in features:
UpperCAmelCase_ = self.decode_prefix(feature.to(_lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase_ = self.generate_beam(
input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase_ = torch.stack(_lowerCamelCase )
UpperCAmelCase_ = torch.stack(_lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any] = 5 , UpperCAmelCase__ : Dict = 67 , UpperCAmelCase__ : Tuple = 1.0 , UpperCAmelCase__ : Optional[int] = None , ) ->Optional[Any]:
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int )
UpperCAmelCase_ = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase_ = input_embeds
else:
UpperCAmelCase_ = self.transformer.transformer.wte(_lowerCamelCase )
for i in range(_lowerCamelCase ):
UpperCAmelCase_ = self.transformer(inputs_embeds=_lowerCamelCase )
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase_ = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase_ = logits.topk(_lowerCamelCase , -1 )
UpperCAmelCase_ = generated.expand(_lowerCamelCase , *generated.shape[1:] )
UpperCAmelCase_ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase_ = next_tokens
else:
UpperCAmelCase_ = tokens.expand(_lowerCamelCase , *tokens.shape[1:] )
UpperCAmelCase_ = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCAmelCase_ = -float(np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase_ = scores_sum / seq_lengths[:, None]
UpperCAmelCase_ = scores_sum_average.view(-1 ).topk(_lowerCamelCase , -1 )
UpperCAmelCase_ = next_tokens // scores_sum.shape[1]
UpperCAmelCase_ = seq_lengths[next_tokens_source]
UpperCAmelCase_ = next_tokens % scores_sum.shape[1]
UpperCAmelCase_ = next_tokens.unsqueeze(1 )
UpperCAmelCase_ = tokens[next_tokens_source]
UpperCAmelCase_ = torch.cat((tokens, next_tokens) , dim=1 )
UpperCAmelCase_ = generated[next_tokens_source]
UpperCAmelCase_ = scores_sum_average * seq_lengths
UpperCAmelCase_ = is_stopped[next_tokens_source]
UpperCAmelCase_ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCAmelCase_ = torch.cat((generated, next_token_embed) , dim=1 )
UpperCAmelCase_ = is_stopped + next_tokens.eq(_lowerCamelCase ).squeeze()
if is_stopped.all():
break
UpperCAmelCase_ = scores / seq_lengths
UpperCAmelCase_ = scores.argsort(descending=_lowerCamelCase )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase_ = [tokens[i] for i in order]
UpperCAmelCase_ = torch.stack(_lowerCamelCase , dim=0 )
UpperCAmelCase_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 390 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
UpperCamelCase_: Tuple = json.load(UpperCAmelCase__ )
UpperCamelCase_: List[str] = LukeConfig(use_entity_aware_attention=UpperCAmelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
UpperCamelCase_: Optional[int] = torch.load(UpperCAmelCase__ , map_location='cpu' )['module']
# Load the entity vocab file
UpperCamelCase_: Any = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
UpperCamelCase_: List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase_: Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase_: Any = AddedToken('<ent>' , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = AddedToken('<ent2>' , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , 'r' ) as f:
UpperCamelCase_: Union[str, Any] = json.load(UpperCAmelCase__ )
UpperCamelCase_: str = 'MLukeTokenizer'
with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
UpperCamelCase_: Any = tokenizer.convert_tokens_to_ids(['@'] )[0]
UpperCamelCase_: List[str] = tokenizer.convert_tokens_to_ids(['#'] )[0]
UpperCamelCase_: Tuple = state_dict['embeddings.word_embeddings.weight']
UpperCamelCase_: int = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase_: str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase_: Union[str, Any] = state_dict[bias_name]
UpperCamelCase_: Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase_: Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase_: List[Any] = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase_: str = state_dict[prefix + matrix_name]
UpperCamelCase_: Dict = state_dict[prefix + matrix_name]
UpperCamelCase_: Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase_: List[str] = state_dict['entity_embeddings.entity_embeddings.weight']
UpperCamelCase_: int = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase_: Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase_: Optional[Any] = state_dict['entity_predictions.bias']
UpperCamelCase_: List[str] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase_: List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase_: List[Any] = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
UpperCamelCase_: Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
UpperCamelCase_: Union[str, Any] = state_dict[key]
else:
UpperCamelCase_: Dict = state_dict[key]
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase_: Optional[int] = MLukeTokenizer.from_pretrained(UpperCAmelCase__ , task='entity_classification' )
UpperCamelCase_: Tuple = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
UpperCamelCase_: Optional[int] = (0, 9)
UpperCamelCase_: Union[str, Any] = tokenizer(UpperCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase_: str = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: int = torch.Size((1, 3_3, 7_6_8) )
UpperCamelCase_: Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: Dict = torch.Size((1, 1, 7_6_8) )
UpperCamelCase_: Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase_: str = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = 'Tokyo is the capital of <mask>.'
UpperCamelCase_: Dict = (2_4, 3_0)
UpperCamelCase_: int = tokenizer(UpperCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase_: Dict = model(**UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = encoding['input_ids'][0].tolist()
UpperCamelCase_: List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
UpperCamelCase_: Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase_: Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> int:
UpperCamelCase_: Optional[Any] = ['[MASK]', '[PAD]', '[UNK]']
UpperCamelCase_: Any = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
UpperCamelCase_: Tuple = {}
for entry in data:
UpperCamelCase_: Optional[int] = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase_: Union[str, Any] = entity_id
break
UpperCamelCase_: Dict = F'''{language}:{entity_name}'''
UpperCamelCase_: Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
A_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 57 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_a = StableDiffusionInstructPixaPixPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ :List[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ :int = CLIPTextModel(_lowerCamelCase )
UpperCamelCase__ :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCamelCase__ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
UpperCamelCase__ :List[Any] = torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase__ :Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase__ :Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :str = self.get_dummy_components()
UpperCamelCase__ :Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase__ :int = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase__ :int = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase__ :Union[str, Any] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase__ :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Optional[int] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :int = self.get_dummy_components()
UpperCamelCase__ :Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase__ :List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase__ :Any = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase__ :Dict = 'french fries'
UpperCamelCase__ :Optional[int] = sd_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
UpperCamelCase__ :List[str] = output.images
UpperCamelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Dict = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Tuple = self.get_dummy_components()
UpperCamelCase__ :Any = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase__ :List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase__ :Dict = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase__ :List[Any] = [inputs['prompt']] * 2
UpperCamelCase__ :Optional[Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
UpperCamelCase__ :Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
UpperCamelCase__ :Tuple = image / 2 + 0.5
UpperCamelCase__ :int = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase__ :int = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase__ :List[Any] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase__ :List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCamelCase__ :Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Any = self.get_dummy_components()
UpperCamelCase__ :str = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
UpperCamelCase__ :Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase__ :str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase__ :Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase__ :Optional[int] = sd_pipe(**_lowerCamelCase ).images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ :Tuple = [round(_lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Tuple = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
UpperCamelCase__ :str = VaeImageProcessor(do_resize=_lowerCamelCase , do_normalize=_lowerCamelCase )
UpperCamelCase__ :Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase__ :Dict = pipe(**self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='''pt''' ) )[0]
UpperCamelCase__ :Union[str, Any] = components['vae']
UpperCamelCase__ :Union[str, Any] = self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase__ :str = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase__ :Dict = pipe(**_lowerCamelCase )[0]
UpperCamelCase__ :str = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCamelCase , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :int = torch.manual_seed(_lowerCamelCase )
UpperCamelCase__ :int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
UpperCamelCase__ :Union[str, Any] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase__ :List[str] = self.get_inputs()
UpperCamelCase__ :Optional[int] = pipe(**_lowerCamelCase ).images
UpperCamelCase__ :Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Optional[Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase )
UpperCamelCase__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase__ :Union[str, Any] = self.get_inputs()
UpperCamelCase__ :Optional[Any] = pipe(**_lowerCamelCase ).images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Any = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase )
UpperCamelCase__ :Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase__ :int = self.get_inputs()
UpperCamelCase__ :Any = pipe(**_lowerCamelCase ).images
UpperCamelCase__ :str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :str = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = 0
def callback_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None:
UpperCamelCase__ :List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase__ :List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :Optional[Any] = latents[0, -3:, -3:, -1]
UpperCamelCase__ :Tuple = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase__ :str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :Optional[int] = latents[0, -3:, -3:, -1]
UpperCamelCase__ :Any = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
UpperCamelCase__ :Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase__ :Optional[Any] = self.get_inputs()
pipe(**_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ :List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
UpperCamelCase__ :Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :Union[str, Any] = self.get_inputs()
UpperCamelCase__ :Optional[Any] = pipe(**_lowerCamelCase )
UpperCamelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ :Dict = inputs['image'].resize((504, 504) )
UpperCamelCase__ :int = 'timbrooks/instruct-pix2pix'
UpperCamelCase__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase__ :Tuple = pipe(**_lowerCamelCase )
UpperCamelCase__ :Optional[int] = output.images[0]
UpperCamelCase__ :List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 | 189 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict ='''distilbert'''
a : List[str] ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=1_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=4 * 7_6_8 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Optional[int] = sinusoidal_pos_embds
UpperCamelCase_: Union[str, Any] = n_layers
UpperCamelCase_: Optional[int] = n_heads
UpperCamelCase_: int = dim
UpperCamelCase_: Tuple = hidden_dim
UpperCamelCase_: Any = dropout
UpperCamelCase_: Optional[Any] = attention_dropout
UpperCamelCase_: List[str] = activation
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = qa_dropout
UpperCamelCase_: List[str] = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 57 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def __magic_name__ ( lowercase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = split_dict._to_yaml_list()
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
UpperCamelCase = SplitDict._from_yaml_list(UpperCAmelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase__ ), SplitInfo(dataset_name="my_dataset" )] )
def __magic_name__ ( lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 606 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : int = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[str] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 | 0 |
"""simple docstring"""
def __a ( ) -> str:
'''simple docstring'''
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 1_901
A__ = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 337 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline | 57 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( _UpperCamelCase : Dict=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
__UpperCAmelCase : Optional[Any] = subparsers.add_parser("""env""" )
else:
__UpperCAmelCase : List[str] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = torch.__version__
__UpperCAmelCase : Optional[Any] = torch.cuda.is_available()
__UpperCAmelCase : Optional[int] = is_xpu_available()
__UpperCAmelCase : str = is_npu_available()
__UpperCAmelCase : str = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
__UpperCAmelCase : int = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : List[str] = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(UpperCAmelCase__ ),
'PyTorch NPU available': str(UpperCAmelCase__ ),
'System RAM': f'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
__UpperCAmelCase : List[Any] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
__UpperCAmelCase : Optional[Any] = (
'\n'.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else f'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
__UpperCAmelCase : str = accelerate_config
return info
def lowerCamelCase ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = env_command_parser()
__UpperCAmelCase : Optional[int] = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 139 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
A_ : int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Tuple = {}
with open(UpperCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: List[Any] = line.strip()
if line:
UpperCamelCase_: List[Any] = line.split()
UpperCamelCase_: Optional[Any] = line_number
UpperCamelCase_: Any = words[0]
UpperCamelCase_: List[Any] = value
return result
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
for attribute in key.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Any = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: Dict = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: Optional[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Optional[Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Tuple = shape_pointer.shape
# let's reduce dimension
UpperCamelCase_: int = value[0]
else:
UpperCamelCase_: Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase_: Optional[int] = value
elif weight_type == "weight_g":
UpperCamelCase_: Any = value
elif weight_type == "weight_v":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "bias":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = value
else:
UpperCamelCase_: int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Dict = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: List[Any] = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: List[Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Any = '.'.join([key, hf_param_name] )
else:
UpperCamelCase_: Union[str, Any] = key
UpperCamelCase_: Any = value if 'lm_head' in full_key else value[0]
A_ : str = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Any:
UpperCamelCase_: Optional[int] = False
for key, mapped_key in MAPPING.items():
UpperCamelCase_: Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase_: Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase_: Optional[int] = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
UpperCamelCase_: Any = mapped_key.replace('*' , UpperCAmelCase__ )
if "weight_g" in name:
UpperCamelCase_: Union[str, Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_: Dict = 'weight_v'
elif "bias" in name:
UpperCamelCase_: int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_: str = 'weight'
else:
UpperCamelCase_: Union[str, Any] = None
if hf_dict is not None:
rename_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return is_used
return is_used
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Dict = fairseq_model.state_dict()
UpperCamelCase_: Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_: Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_: List[Any] = True
else:
UpperCamelCase_: Tuple = load_wavaveca_layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1]
UpperCamelCase_: int = name.split('.' )
UpperCamelCase_: int = int(items[0] )
UpperCamelCase_: Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase_: int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase_: List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False ) -> Dict:
if config_path is not None:
UpperCamelCase_: Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCamelCase_: List[str] = WavaVecaConfig()
if is_seq_class:
UpperCamelCase_: int = read_txt_into_dict(UpperCAmelCase__ )
UpperCamelCase_: Tuple = idalabel
UpperCamelCase_: str = WavaVecaForSequenceClassification(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
feature_extractor.save_pretrained(UpperCAmelCase__ )
elif is_finetuned:
if dict_path:
UpperCamelCase_: List[Any] = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase_: Dict = target_dict.pad_index
UpperCamelCase_: Tuple = target_dict.bos_index
UpperCamelCase_: Optional[Any] = target_dict.eos_index
UpperCamelCase_: Union[str, Any] = len(target_dict.symbols )
UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: str = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase_: List[str] = 0
UpperCamelCase_: List[Any] = 1
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = WavaVecaCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , )
UpperCamelCase_: Any = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase_: Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
UpperCamelCase_: Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: Any = WavaVecaForCTC(UpperCAmelCase__ )
else:
UpperCamelCase_: Any = WavaVecaForPreTraining(UpperCAmelCase__ )
if is_finetuned or is_seq_class:
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase_: List[str] = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase_: Any = fairseq.tasks.setup_task(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__ )
UpperCamelCase_: str = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
A_ : int = parser.parse_args()
A_ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 57 | 0 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
snake_case_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
snake_case_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : List[str] = 'rougeLsum'
UpperCAmelCase : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
UpperCAmelCase : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ['rouge1', 'rouge2', 'rougeL']
UpperCAmelCase : Union[str, Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
UpperCAmelCase : List[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : List[str] = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
UpperCAmelCase : Union[str, Any] = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
UpperCAmelCase : Tuple = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
UpperCAmelCase : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase__ )['rougeLsum']
UpperCAmelCase : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] )['rougeLsum']
assert new_score > prev_score
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : int = Path("examples/seq2seq/test_data/wmt_en_ro" )
UpperCAmelCase : Union[str, Any] = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase : str = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 595 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[int] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase_: Tuple = test_metrics
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ):
self.test_metrics.main()
@require_multi_gpu
def _a ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase_: List[Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) | 57 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A ( UpperCAmelCase_ ):
'''simple docstring'''
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , """depth_multiplier""" ) )
class A :
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : int=8 , _UpperCAmelCase : int=8 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str="relu6" , _UpperCAmelCase : str=1280 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[str]=10 , _UpperCAmelCase : Optional[int]=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = depth_divisible_by
lowercase__ = min_depth
lowercase__ = expand_ratio
lowercase__ = tf_padding
lowercase__ = output_stride
lowercase__ = first_layer_is_expansion
lowercase__ = finegrained_output
lowercase__ = hidden_act
lowercase__ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase__ = classifier_dropout_prob
lowercase__ = use_labels
lowercase__ = is_training
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ (self : int , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = MobileNetVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase__ = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase__ = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase__ = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = MobileNetVaModelTester(self )
lowercase__ = MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
pass
def lowerCamelCase__ (self : int ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ):
lowercase__ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = 16
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MobileNetVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : str ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(_lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
lowercase__ = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase__ = model.to(_lowerCamelCase )
lowercase__ = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_lowerCamelCase )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _lowerCamelCase )
lowercase__ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 15 |
import math
class _lowerCAmelCase:
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: int = 0.0
UpperCamelCase_: Tuple = 0.0
for i in range(len(_lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for i in range(len(_lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case () -> None:
# Training Examples ( m, n )
UpperCamelCase_: List[str] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCamelCase_: List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCamelCase_: Dict = SelfOrganizingMap()
UpperCamelCase_: List[Any] = 3
UpperCamelCase_: List[str] = 0.5
for _ in range(UpperCAmelCase__ ):
for j in range(len(UpperCAmelCase__ ) ):
# training sample
UpperCamelCase_: int = training_samples[j]
# Compute the winning vector
UpperCamelCase_: Tuple = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ )
# Update the winning vector
UpperCamelCase_: Union[str, Any] = self_organizing_map.update(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# classify test sample
UpperCamelCase_: Dict = [0, 0, 0, 1]
UpperCamelCase_: Union[str, Any] = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main() | 57 | 0 |
from math import factorial
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ ):
A__ : List[Any] = real
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ : Optional[int] = [1] * rank
else:
A__ : List[str] = rank
def __repr__( self ):
return (
F"""{self.real}+"""
F"""{'+'.join(str(_lowerCamelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __A ( self ):
A__ : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCamelCase )
def __add__( self , A__ ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
return Dual(self.real + other , self.duals )
A__ : List[Any] = self.duals.copy()
A__ : Optional[int] = other.duals.copy()
if len(_lowerCamelCase ) > len(_lowerCamelCase ):
o_dual.extend([1] * (len(_lowerCamelCase ) - len(_lowerCamelCase )) )
elif len(_lowerCamelCase ) < len(_lowerCamelCase ):
s_dual.extend([1] * (len(_lowerCamelCase ) - len(_lowerCamelCase )) )
A__ : List[str] = []
for i in range(len(_lowerCamelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCamelCase )
UpperCAmelCase__: Union[str, Any] = __add__
def __sub__( self , A__ ):
return self + other * -1
def __mul__( self , A__ ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCamelCase )
A__ : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCamelCase )
UpperCAmelCase__: Optional[int] = __mul__
def __truediv__( self , A__ ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCamelCase )
raise ValueError
def __floordiv__( self , A__ ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCamelCase )
raise ValueError
def __pow__( self , A__ ):
if n < 0 or isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
A__ : List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Dict , lowercase_: Optional[Any] ) -> int:
if not callable(UpperCAmelCase__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(UpperCAmelCase__ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
A__ : Optional[int] = Dual(UpperCAmelCase__ , 1 )
A__ : Optional[Any] = func(UpperCAmelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCamelCase (lowercase_: str ) -> Optional[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 456 |
from collections import namedtuple
A_ : Tuple = namedtuple('from_to', 'from_ to')
A_ : int = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(UpperCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(UpperCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Optional[int] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class a_ ( UpperCAmelCase_ ):
a : Optional[Any] = '''ibert'''
def __init__( self : int , __UpperCamelCase : Optional[int]=3_05_22 , __UpperCamelCase : Optional[Any]=7_68 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : Any=30_72 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Tuple=5_12 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Any=1 , __UpperCamelCase : int=0 , __UpperCamelCase : Any=2 , __UpperCamelCase : List[Any]="absolute" , __UpperCamelCase : int=False , __UpperCamelCase : Optional[Any]="none" , **__UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = quant_mode
_UpperCAmelCase = force_dequant
class a_ ( UpperCAmelCase_ ):
@property
def _snake_case ( self : int ) ->List[str]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 555 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase ) | 57 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[str] =ConsistencyModelPipeline
__lowerCAmelCase : Union[str, Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowerCAmelCase : List[str] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__lowerCAmelCase : List[str] =frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase__ ( self :int, snake_case :Tuple=False):
"""simple docstring"""
if class_cond:
_lowercase =self.dummy_cond_unet
else:
_lowercase =self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase =CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=80.0, )
_lowercase ={
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Optional[int], snake_case :Dict=0):
"""simple docstring"""
if str(_lowerCamelCase).startswith('mps'):
_lowercase =torch.manual_seed(_lowerCamelCase)
else:
_lowercase =torch.Generator(device=_lowerCamelCase).manual_seed(_lowerCamelCase)
_lowercase ={
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components()
_lowercase =ConsistencyModelPipeline(**_lowerCamelCase)
_lowercase =pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_dummy_inputs(_lowerCamelCase)
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components(class_cond=_lowerCamelCase)
_lowercase =ConsistencyModelPipeline(**_lowerCamelCase)
_lowercase =pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_dummy_inputs(_lowerCamelCase)
_lowercase =0
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components()
_lowercase =ConsistencyModelPipeline(**_lowerCamelCase)
_lowercase =pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_dummy_inputs(_lowerCamelCase)
_lowercase =1
_lowercase =None
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components(class_cond=_lowerCamelCase)
_lowercase =ConsistencyModelPipeline(**_lowerCamelCase)
_lowercase =pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_dummy_inputs(_lowerCamelCase)
_lowercase =1
_lowercase =None
_lowercase =0
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :str):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self :Optional[int], snake_case :List[Any]=0, snake_case :List[str]=False, snake_case :Union[str, Any]="cpu", snake_case :Union[str, Any]=torch.floataa, snake_case :Any=(1, 3, 64, 64)):
"""simple docstring"""
_lowercase =torch.manual_seed(_lowerCamelCase)
_lowercase ={
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase =self.get_fixed_latents(seed=_lowerCamelCase, device=_lowerCamelCase, dtype=_lowerCamelCase, shape=_lowerCamelCase)
_lowercase =latents
return inputs
def UpperCamelCase__ ( self :int, snake_case :Optional[int]=0, snake_case :Any="cpu", snake_case :Union[str, Any]=torch.floataa, snake_case :int=(1, 3, 64, 64)):
"""simple docstring"""
if type(_lowerCamelCase) == str:
_lowercase =torch.device(_lowerCamelCase)
_lowercase =torch.Generator(device=_lowerCamelCase).manual_seed(_lowerCamelCase)
_lowercase =randn_tensor(_lowerCamelCase, generator=_lowerCamelCase, device=_lowerCamelCase, dtype=_lowerCamelCase)
return latents
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase =CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=80.0, )
_lowercase =ConsistencyModelPipeline(unet=_lowerCamelCase, scheduler=_lowerCamelCase)
pipe.to(torch_device=_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_inputs()
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase =CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=80.0, )
_lowercase =ConsistencyModelPipeline(unet=_lowerCamelCase, scheduler=_lowerCamelCase)
pipe.to(torch_device=_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_inputs()
_lowercase =1
_lowercase =None
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
@require_torch_a
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase =CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=80.0, )
_lowercase =ConsistencyModelPipeline(unet=_lowerCamelCase, scheduler=_lowerCamelCase)
pipe.to(torch_device=_lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_inputs(get_fixed_latents=_lowerCamelCase, device=_lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCamelCase, enable_math=_lowerCamelCase, enable_mem_efficient=_lowerCamelCase):
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_a
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase =CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=80.0, )
_lowercase =ConsistencyModelPipeline(unet=_lowerCamelCase, scheduler=_lowerCamelCase)
pipe.to(torch_device=_lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
_lowercase =self.get_inputs(get_fixed_latents=_lowerCamelCase, device=_lowerCamelCase)
_lowercase =1
_lowercase =None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCamelCase, enable_math=_lowerCamelCase, enable_mem_efficient=_lowerCamelCase):
_lowercase =pipe(**_lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase =image[0, -3:, -3:, -1]
_lowercase =np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 181 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Optional[int] = 256
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''melgan''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
# From MELGAN
UpperCamelCase_: Any = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase_: List[Any] = 4.0 # Largest value for most examples
UpperCamelCase_: Tuple = 1_2_8
self.register_modules(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = output_range
if clip:
UpperCamelCase_: int = torch.clip(_lowerCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase_: List[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Dict = input_range
UpperCamelCase_: List[str] = torch.clip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase_: Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = input_tokens > 0
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.notes_encoder(
encoder_input_tokens=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: Any = self.continuous_encoder(
encoder_inputs=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = noise_time
if not torch.is_tensor(_lowerCamelCase ):
UpperCamelCase_: List[str] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowerCamelCase ) and len(timesteps.shape ) == 0:
UpperCamelCase_: Dict = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase_: Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase_: Any = self.decoder(
encodings_and_masks=_lowerCamelCase , decoder_input_tokens=_lowerCamelCase , decoder_noise_time=_lowerCamelCase )
return logits
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 1_0_0 , _lowerCamelCase = True , _lowerCamelCase = "numpy" , _lowerCamelCase = None , _lowerCamelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_lowerCamelCase )}.''' )
UpperCamelCase_: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase_: str = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase_: Dict = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowerCamelCase ):
if i == 0:
UpperCamelCase_: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase_: Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase_: Any = ones
UpperCamelCase_: str = self.scale_features(
_lowerCamelCase , output_range=[-1.0, 1.0] , clip=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowerCamelCase , continuous_mask=_lowerCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase_: List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowerCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase_: int = self.decode(
encodings_and_masks=_lowerCamelCase , input_tokens=_lowerCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase_: Tuple = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
UpperCamelCase_: List[Any] = self.scale_to_features(_lowerCamelCase , input_range=[-1.0, 1.0] )
UpperCamelCase_: Any = mel[:1]
UpperCamelCase_: List[str] = mel.cpu().float().numpy()
UpperCamelCase_: Tuple = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase )
logger.info('Generated segment' , _lowerCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
UpperCamelCase_: int = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase_: int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowerCamelCase ) | 57 | 0 |
'''simple docstring'''
import pytest
lowercase__ : Tuple = '__dummy_dataset1__'
lowercase__ : Union[str, Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCamelCase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCamelCase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=UpperCAmelCase__ )
UpperCAmelCase_ = script_dir / F"""{script_name}.py"""
with open(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ )
return str(UpperCAmelCase__ )
| 390 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
A_ : Optional[Any] = ['names', 'prefix']
A_ : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A_ : List[Any] = ['encoding_errors', 'on_bad_lines']
A_ : Optional[Any] = ['date_format']
@dataclass
class _lowerCAmelCase( datasets.BuilderConfig ):
"""simple docstring"""
a : str =","
a : Optional[str] =None
a : Optional[Union[int, List[int], str]] ="infer"
a : Optional[List[str]] =None
a : Optional[List[str]] =None
a : Optional[Union[int, str, List[int], List[str]]] =None
a : Optional[Union[List[int], List[str]]] =None
a : Optional[str] =None
a : bool =True
a : Optional[Literal["c", "python", "pyarrow"]] =None
a : Dict[Union[int, str], Callable[[Any], Any]] =None
a : Optional[list] =None
a : Optional[list] =None
a : bool =False
a : Optional[Union[int, List[int]]] =None
a : Optional[int] =None
a : Optional[Union[str, List[str]]] =None
a : bool =True
a : bool =True
a : bool =False
a : bool =True
a : Optional[str] =None
a : str ="."
a : Optional[str] =None
a : str ='"'
a : int =0
a : Optional[str] =None
a : Optional[str] =None
a : Optional[str] =None
a : Optional[str] =None
a : bool =True
a : bool =True
a : int =0
a : bool =True
a : bool =False
a : Optional[str] =None
a : int =10000
a : Optional[datasets.Features] =None
a : Optional[str] ="strict"
a : Literal["error", "warn", "skip"] ="error"
a : Optional[str] =None
def _a ( self ):
if self.delimiter is not None:
UpperCamelCase_: Optional[Any] = self.delimiter
if self.column_names is not None:
UpperCamelCase_: int = self.column_names
@property
def _a ( self ):
UpperCamelCase_: Any = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a : Dict =CsvConfig
def _a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , _lowerCamelCase ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Tuple = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase_: Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = [files]
UpperCamelCase_: int = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'files': files} ) )
return splits
def _a ( self , _lowerCamelCase ):
if self.config.features is not None:
UpperCamelCase_: List[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
UpperCamelCase_: Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(_lowerCamelCase , _lowerCamelCase )
return pa_table
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCamelCase_: Dict = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = pd.read_csv(_lowerCamelCase , iterator=_lowerCamelCase , dtype=_lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = pa.Table.from_pandas(_lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' )
raise | 57 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case = random.Random()
if is_torch_available():
import torch
def a ( __a , __a=1.0 , __a=None , __a=None ) -> Any:
'''simple docstring'''
if rng is None:
UpperCamelCase__ :str = global_rng
UpperCamelCase__ :Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=400 , UpperCamelCase_=2000 , UpperCamelCase_=1 , UpperCamelCase_=0.0 , UpperCamelCase_=16000 , UpperCamelCase_=True , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :int = batch_size
UpperCamelCase__ :int = min_seq_length
UpperCamelCase__ :int = max_seq_length
UpperCamelCase__ :List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ :Dict = feature_size
UpperCamelCase__ :Optional[Any] = padding_value
UpperCamelCase__ :List[Any] = sampling_rate
UpperCamelCase__ :Optional[int] = return_attention_mask
UpperCamelCase__ :Optional[Any] = do_normalize
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self , UpperCamelCase_=False , UpperCamelCase_=False ):
'''simple docstring'''
def _flatten(UpperCamelCase_ ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
UpperCamelCase__ :List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase__ :str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ :Optional[int] = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_a = ASTFeatureExtractor
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ASTFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ :Optional[Any] = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ :int = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCamelCase__ :str = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
# Test batched
UpperCamelCase__ :str = feat_extract(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' ).input_values
UpperCamelCase__ :List[str] = feat_extract(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ :List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ :int = np.asarray(_lowerCamelCase )
UpperCamelCase__ :Optional[Any] = feat_extract(_lowerCamelCase , return_tensors='''np''' ).input_values
UpperCamelCase__ :List[Any] = feat_extract(_lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import torch
UpperCamelCase__ :Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ :List[str] = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase__ :int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ :Tuple = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase__ :List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase__ :Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase__ :Tuple = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
UpperCamelCase__ :Any = self._load_datasamples(1 )
UpperCamelCase__ :List[Any] = ASTFeatureExtractor()
UpperCamelCase__ :Optional[int] = feature_extractor(_lowerCamelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCamelCase , atol=1e-4 ) ) | 189 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Union[str, Any] = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''open-llama'''
def __init__( self , _lowerCamelCase=1_0_0_0_0_0 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=1_1_0_0_8 , _lowerCamelCase=3_2 , _lowerCamelCase=3_2 , _lowerCamelCase="silu" , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCamelCase_: int = vocab_size
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Dict = hidden_size
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: List[Any] = rms_norm_eps
UpperCamelCase_: Union[str, Any] = use_cache
UpperCamelCase_: Dict = kwargs.pop(
'use_memorry_efficient_attention' , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_dropout_prob
UpperCamelCase_: int = use_stable_embedding
UpperCamelCase_: Tuple = shared_input_output_embedding
UpperCamelCase_: str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
UpperCamelCase_: str = self.rope_scaling.get('type' , _lowerCamelCase )
UpperCamelCase_: int = self.rope_scaling.get('factor' , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 57 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("albert-base-v2" )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FlaxAlbertModel.from_pretrained("albert-base-v2" )
UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 606 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=1_6 , _lowerCamelCase=[3_2, 6_4, 1_2_8] , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2"] , _lowerCamelCase=[1, 2] , ):
UpperCamelCase_: Tuple = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = image_size
UpperCamelCase_: Tuple = patch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Dict = embed_dim
UpperCamelCase_: List[Any] = hidden_sizes
UpperCamelCase_: List[str] = depths
UpperCamelCase_: List[str] = num_heads
UpperCamelCase_: Optional[int] = window_size
UpperCamelCase_: Tuple = mlp_ratio
UpperCamelCase_: Dict = qkv_bias
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_: int = drop_path_rate
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: List[str] = use_absolute_embeddings
UpperCamelCase_: Dict = patch_norm
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: List[Any] = is_training
UpperCamelCase_: Optional[int] = scope
UpperCamelCase_: str = use_labels
UpperCamelCase_: List[str] = type_sequence_label_size
UpperCamelCase_: Union[str, Any] = encoder_stride
UpperCamelCase_: Dict = out_features
UpperCamelCase_: str = out_indices
def _a ( self ):
UpperCamelCase_: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: List[Any] = None
if self.use_labels:
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = FocalNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: int = model(_lowerCamelCase )
UpperCamelCase_: int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase_: int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase_: int = None
UpperCamelCase_: List[Any] = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = FocalNetForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: Dict = FocalNetForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = self.type_sequence_label_size
UpperCamelCase_: List[Any] = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: Dict = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: List[str] = config_and_inputs
UpperCamelCase_: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a : Any =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
a : Dict =False
a : Union[str, Any] =False
a : Tuple =False
a : Optional[int] =False
a : Union[str, Any] =False
def _a ( self ):
UpperCamelCase_: str = FocalNetModelTester(self )
UpperCamelCase_: Tuple = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=3_7 , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def _a ( self ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_: List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: List[Any] = model_class(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Any = [*signature.parameters.keys()]
UpperCamelCase_: List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: Union[str, Any] = outputs.hidden_states
UpperCamelCase_: Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# FocalNet has a different seq_length
UpperCamelCase_: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase_: Dict = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: int = reshaped_hidden_states[0].shape
UpperCamelCase_: List[str] = (
reshaped_hidden_states[0].view(_lowerCamelCase , _lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: str = 3
UpperCamelCase_: Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase_: int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase_: str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: Dict = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@slow
def _a ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: List[Any] = FocalNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: Dict = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = self.default_image_processor
UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_: str = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: List[str] = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: Optional[int] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(FocalNetBackbone,) if is_torch_available() else ()
a : List[str] =FocalNetConfig
a : List[str] =False
def _a ( self ):
UpperCamelCase_: Any = FocalNetModelTester(self ) | 57 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = XGLMConfig
__lowerCAmelCase = {}
__lowerCAmelCase = '''gelu'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=14 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.0_2 , ):
__a : Dict = parent
__a : str = batch_size
__a : List[str] = seq_length
__a : Tuple = is_training
__a : Optional[Any] = use_input_mask
__a : Any = use_labels
__a : List[Any] = vocab_size
__a : Optional[Any] = d_model
__a : List[str] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : List[str] = ffn_dim
__a : str = activation_function
__a : Union[str, Any] = activation_dropout
__a : str = attention_dropout
__a : str = max_position_embeddings
__a : List[str] = initializer_range
__a : str = None
__a : List[Any] = 0
__a : str = 2
__a : Tuple = 1
def _lowerCamelCase ( self ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowerCamelCase ( self ):
__a : List[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__a : Dict = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = self.get_config()
__a : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCamelCase ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCamelCase , )
def _lowerCamelCase ( self ):
__a : int = self.prepare_config_and_inputs()
(
__a
) : List[str] = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCAmelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Dict = TFXGLMModelTester(self )
__a : Any = ConfigTester(self , config_class=_lowerCamelCase , n_embd=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@slow
def _lowerCamelCase ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFXGLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowerCamelCase ( self ):
super().test_resize_token_embeddings()
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self , _UpperCAmelCase=True ):
__a : Optional[Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__a : int = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__a : List[Any] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__a : int = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
@slow
def _lowerCamelCase ( self ):
__a : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__a : int = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
__a : str = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
__a : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
__a : str = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , seed=[7, 0] )
__a : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCamelCase )
__a : Dict = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _lowerCamelCase ( self ):
__a : List[Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__a : List[Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__a : Optional[int] = 'left'
# use different length sentences to test batching
__a : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__a : List[str] = tokenizer(_lowerCamelCase , return_tensors='''tf''' , padding=_lowerCamelCase )
__a : Optional[Any] = inputs['input_ids']
__a : Optional[Any] = model.generate(input_ids=_lowerCamelCase , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
__a : Dict = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__a : int = model.generate(input_ids=_lowerCamelCase , max_new_tokens=12 )
__a : Tuple = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__a : str = model.generate(input_ids=_lowerCamelCase , max_new_tokens=12 )
__a : Tuple = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__a : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCamelCase )
__a : int = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCamelCase )
__a : Dict = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [non_padded_sentence, padded_sentence] ) | 52 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( A ) -> Any:
'''simple docstring'''
A__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __a ( A ) -> int:
'''simple docstring'''
A__ = emb.weight.shape
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
A__ = emb.weight.data
return lin_layer
def __a ( A , A=None ) -> Union[str, Any]:
'''simple docstring'''
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def __a ( A , A , A , A , A = WEIGHTS_NAME ) -> Any:
'''simple docstring'''
A__ = []
A__ = 0
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
for expert in range(UpperCAmelCase__ ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(UpperCAmelCase__ ):
A__ = torch.load(UpperCAmelCase__ )['model']
remove_ignore_keys_(UpperCAmelCase__ )
A__ = rename_fairseq_keys(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = os.path.join(
UpperCAmelCase__ , weights_name.replace(".bin" , f"""-{len(UpperCAmelCase__ )+1:05d}-of-???.bin""" ) )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(UpperCAmelCase__ )[0]].dtype )
# Add the last block
A__ = os.path.join(UpperCAmelCase__ , weights_name.replace(".bin" , f"""-{len(UpperCAmelCase__ )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )['model']
remove_ignore_keys_(UpperCAmelCase__ )
A__ = rename_fairseq_keys(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(UpperCAmelCase__ ) == 1:
A__ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(UpperCAmelCase__ ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(UpperCAmelCase__ ):05d}.bin""" )
A__ = os.path.join(UpperCAmelCase__ , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {'total_size': total_size}
A__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + '\n'
f.write(UpperCAmelCase__ )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCAmelCase =NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCAmelCase =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path) | 337 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case (UpperCAmelCase__ ) -> Union[str, Any]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCAmelCase__ , '_dynamo' ):
return False
return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Any:
UpperCamelCase_: Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase_: int = is_compiled_module(UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: List[str] = model
UpperCamelCase_: Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Dict = model.module
if not keep_fpaa_wrapper:
UpperCamelCase_: int = getattr(UpperCAmelCase__ , 'forward' )
UpperCamelCase_: List[str] = model.__dict__.pop('_original_forward' , UpperCAmelCase__ )
if original_forward is not None:
while hasattr(UpperCAmelCase__ , '__wrapped__' ):
UpperCamelCase_: Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase_: Optional[int] = forward
if getattr(UpperCAmelCase__ , '_converted_to_transformer_engine' , UpperCAmelCase__ ):
convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: Union[str, Any] = model
UpperCamelCase_: Tuple = compiled_model
return model
def snake_case () -> List[str]:
PartialState().wait_for_everyone()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase__ , UpperCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
@contextmanager
def snake_case (**UpperCAmelCase__ ) -> Any:
for key, value in kwargs.items():
UpperCamelCase_: int = str(UpperCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case (UpperCAmelCase__ ) -> str:
if not hasattr(UpperCAmelCase__ , '__qualname__' ) and not hasattr(UpperCAmelCase__ , '__name__' ):
UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , '__class__' , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(UpperCAmelCase__ , '__name__' ):
return obj.__name__
return str(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
for key, value in source.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Any = destination.setdefault(UpperCAmelCase__ , {} )
merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ )
else:
UpperCamelCase_: str = value
return destination
def snake_case (UpperCAmelCase__ = None ) -> bool:
if port is None:
UpperCamelCase_: List[str] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0 | 57 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Tuple = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 139 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : Optional[Any] = data_utils.TransfoXLTokenizer
A_ : Union[str, Any] = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : Optional[Any] = data_utils
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase__ , 'rb' ) as fp:
UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ )
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ )
UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: List[str] = TransfoXLConfig()
else:
UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
A_ : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 57 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase : int = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase : Dict = model(_lowerCamelCase )['last_hidden_state']
UpperCAmelCase : int = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice.
UpperCAmelCase : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 595 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : List[str] = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: Tuple = b.T
UpperCamelCase_: Tuple = np.sum(np.square(UpperCAmelCase__ ) , axis=1 )
UpperCamelCase_: Optional[Any] = np.sum(np.square(UpperCAmelCase__ ) , axis=0 )
UpperCamelCase_: Optional[int] = np.matmul(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: List[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: List[str] = x.reshape(-1 , 3 )
UpperCamelCase_: Union[str, Any] = squared_euclidean_distance(UpperCAmelCase__ , UpperCAmelCase__ )
return np.argmin(UpperCAmelCase__ , axis=1 )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''pixel_values''']
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: List[str] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
UpperCamelCase_: str = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = np.array(_lowerCamelCase ) if clusters is not None else None
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: List[Any] = size
UpperCamelCase_: Optional[int] = resample
UpperCamelCase_: str = do_normalize
UpperCamelCase_: str = do_color_quantize
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCamelCase , size=(size['height'], size['width']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , ):
UpperCamelCase_: Optional[Any] = rescale(image=_lowerCamelCase , scale=1 / 1_2_7.5 , data_format=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = image - 1
return image
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Tuple = size if size is not None else self.size
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase_: Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase_: Dict = clusters if clusters is not None else self.clusters
UpperCamelCase_: Dict = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[int] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase ) for image in images]
if do_color_quantize:
UpperCamelCase_: Any = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase_: Optional[Any] = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase_: Dict = images.shape[0]
UpperCamelCase_: Any = images.reshape(_lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase_: List[Any] = list(_lowerCamelCase )
else:
UpperCamelCase_: int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: str = {'input_ids': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase ) | 57 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = 'A red cat sitting on a park bench'
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type="""np""" , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ (self : int ) -> int:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase__ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
lowercase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = 'A red cat sitting on a park bench'
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCamelCase , output_type="""np""" , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 15 |
import numpy
# List of input, output pairs
A_ : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
A_ : Any = [2, 4, 1, 5]
A_ : List[Any] = len(train_data)
A_ : List[Any] = 0.009
def snake_case (UpperCAmelCase__ , UpperCAmelCase__="train" ) -> Optional[int]:
return calculate_hypothesis_value(UpperCAmelCase__ , UpperCAmelCase__ ) - output(
UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[Any] = 0
for i in range(len(UpperCAmelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=m ) -> Optional[Any]:
UpperCamelCase_: Any = 0
for i in range(UpperCAmelCase__ ):
if index == -1:
summation_value += _error(UpperCAmelCase__ )
else:
summation_value += _error(UpperCAmelCase__ ) * train_data[i][0][index]
return summation_value
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: Optional[int] = summation_of_cost_derivative(UpperCAmelCase__ , UpperCAmelCase__ ) / m
return cost_derivative_value
def snake_case () -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_: str = 0.00_0002
UpperCamelCase_: Any = 0
UpperCamelCase_: int = 0
while True:
j += 1
UpperCamelCase_: int = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase__ ) ):
UpperCamelCase_: Any = get_cost_derivative(i - 1 )
UpperCamelCase_: Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ , rtol=UpperCAmelCase__ , ):
break
UpperCamelCase_: Optional[int] = temp_parameter_vector
print(('Number of iterations:', j) )
def snake_case () -> int:
for i in range(len(UpperCAmelCase__ ) ):
print(('Actual output value:', output(UpperCAmelCase__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent() | 57 | 0 |
from __future__ import annotations
def UpperCamelCase (lowercase_: Any , lowercase_: int , lowercase_: Optional[int] , lowercase_: Dict ) -> Optional[int]: # noqa: E741
while r - l > 1:
A__ : str = (l + r) // 2
if v[m] >= key:
A__ : Any = m
else:
A__ : int = m # noqa: E741
return r
def UpperCamelCase (lowercase_: str ) -> int:
if len(UpperCAmelCase__ ) == 0:
return 0
A__ : Optional[Any] = [0] * len(UpperCAmelCase__ )
A__ : Optional[int] = 1
A__ : Any = v[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
if v[i] < tail[0]:
A__ : Optional[int] = v[i]
elif v[i] > tail[length - 1]:
A__ : Any = v[i]
length += 1
else:
A__ : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 456 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 57 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : Union[str, Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__ ( cls ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def UpperCAmelCase__ ( cls ) -> int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
snake_case_ : Tuple = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowercase , repo_id="""test-config""" , push_to_hub=_lowercase , use_auth_token=self._token )
snake_case_ : Optional[Any] = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
snake_case_ : Union[str, Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowercase , repo_id="""valid_org/test-config-org""" , push_to_hub=_lowercase , use_auth_token=self._token )
snake_case_ : Tuple = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case_ : List[str] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
snake_case_ : Dict = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case_ : Union[str, Any] = c.n_embd + 1 # int
snake_case_ : Any = c.resid_pdrop + 1.0 # float
snake_case_ : Optional[Any] = not c.scale_attn_weights # bool
snake_case_ : Tuple = c.summary_type + """foo""" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowercase , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(_lowercase , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(_lowercase , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(_lowercase , c.summary_type , """mismatch for key: summary_type""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = PretrainedConfig()
snake_case_ : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowercase , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
snake_case_ : int = [key for key, value in config_common_kwargs.items() if value == getattr(_lowercase , _lowercase )]
if len(_lowercase ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f' {", ".join(_lowercase )}.' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(_lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
snake_case_ : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = mock.Mock()
snake_case_ : str = 5_0_0
snake_case_ : Any = {}
snake_case_ : Union[str, Any] = HTTPError
snake_case_ : Any = {}
# Download this model to make sure it's in the cache.
snake_case_ : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head:
snake_case_ : Tuple = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained("""bert-base-cased""" )
snake_case_ : Dict = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowercase )
snake_case_ : Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowercase , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case_ : Dict = AutoConfig.from_pretrained(_lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case_ : Optional[Any] = ["""config.42.0.0.json"""]
snake_case_ : Tuple = 7_6_8
configuration.save_pretrained(_lowercase )
shutil.move(os.path.join(_lowercase , """config.4.0.0.json""" ) , os.path.join(_lowercase , """config.42.0.0.json""" ) )
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(_lowercase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
snake_case_ : Union[str, Any] = """v4.0.0"""
snake_case_ , snake_case_ : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowercase , return_unused_kwargs=_lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowercase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case_ : List[str] = """v3.0.0"""
snake_case_ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(_lowercase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''ClapFeatureExtractor'''
_lowerCamelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
def __call__( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = kwargs.pop("""sampling_rate""" , _lowercase )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
snake_case_ : Dict = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if audios is not None:
snake_case_ : str = self.feature_extractor(
_lowercase , sampling_rate=_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and audios is not None:
snake_case_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
import random
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : bool = False ):
'''simple docstring'''
snake_case_ : dict = {i: [] for i in range(__UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__UpperCamelCase ):
for j in range(i + 1 , __UpperCamelCase ):
if random.random() < probability:
graph[i].append(__UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__UpperCamelCase )
return graph
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return {
i: [j for j in range(__UpperCamelCase ) if i != j] for i in range(__UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = a.name
snake_case_ : Optional[Any] = b.name
snake_case_ : Dict = """"""
snake_case_ : Optional[Any] = """"""
snake_case_ : Any = a == b
snake_case_ : Optional[int] = name_a
snake_case_ : Optional[Any] = name_b
return res
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__UpperCamelCase , __UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __UpperCamelCase , __UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = list(model.graph.initializer )
snake_case_ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ : Union[str, Any] = inits[i].name
snake_case_ : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[str] = os.path.dirname(__UpperCamelCase )
snake_case_ : Tuple = os.path.basename(__UpperCamelCase )
snake_case_ : str = onnx.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ : Optional[Any] = list(model.graph.initializer )
snake_case_ : str = set()
snake_case_ : int = {}
snake_case_ : Tuple = []
snake_case_ : Union[str, Any] = 0
for i in range(len(__UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__UpperCamelCase )
dup_set.add(__UpperCamelCase )
snake_case_ : Optional[Any] = inits[j].data_type
snake_case_ : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , __UpperCamelCase )
total_reduced_size += mem_size
snake_case_ : Union[str, Any] = inits[i].name
snake_case_ : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__UpperCamelCase )
else:
snake_case_ : Any = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
snake_case_ : Dict = sorted(__UpperCamelCase )
_remove_dup_initializers_from_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Optional[int] = """optimized_""" + model_file_name
snake_case_ : Dict = os.path.join(__UpperCamelCase , __UpperCamelCase )
onnx.save(__UpperCamelCase , __UpperCamelCase )
return new_model
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
__lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
snake_case_ : Optional[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
snake_case_ : Optional[int] = _calculate(days - 1 , __UpperCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
snake_case_ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
snake_case_ : int = _calculate(days - 1 , __UpperCamelCase , 0 )
snake_case_ : Tuple = state_late + state_absent + state_ontime
snake_case_ : List[Any] = prizestrings
return prizestrings
def __lowerCAmelCase ( __UpperCamelCase : int = 3_0 ):
'''simple docstring'''
return _calculate(__UpperCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( __UpperCamelCase : bytes , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Optional[Any] = F'{sampling_rate}'
snake_case_ : Dict = """1"""
snake_case_ : int = """f32le"""
snake_case_ : Dict = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case_ : Tuple = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
snake_case_ : Optional[int] = output_stream[0]
snake_case_ : Optional[Any] = np.frombuffer(__UpperCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : str = "f32le" , ):
'''simple docstring'''
snake_case_ : Any = F'{sampling_rate}'
snake_case_ : Tuple = """1"""
if format_for_conversion == "s16le":
snake_case_ : List[Any] = 2
elif format_for_conversion == "f32le":
snake_case_ : List[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
snake_case_ : str = platform.system()
if system == "Linux":
snake_case_ : Any = """alsa"""
snake_case_ : Dict = """default"""
elif system == "Darwin":
snake_case_ : List[Any] = """avfoundation"""
snake_case_ : Optional[int] = """:0"""
elif system == "Windows":
snake_case_ : int = """dshow"""
snake_case_ : int = """default"""
snake_case_ : List[Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
snake_case_ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case_ : str = _ffmpeg_stream(__UpperCamelCase , __UpperCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[Tuple[float, float], float]] = None , __UpperCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
snake_case_ : Tuple = stream_chunk_s
else:
snake_case_ : Dict = chunk_length_s
snake_case_ : Union[str, Any] = ffmpeg_microphone(__UpperCamelCase , __UpperCamelCase , format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
snake_case_ : Tuple = np.intaa
snake_case_ : int = 2
elif format_for_conversion == "f32le":
snake_case_ : Tuple = np.floataa
snake_case_ : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
snake_case_ : List[Any] = chunk_length_s / 6
snake_case_ : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase , (int, float) ):
snake_case_ : Union[str, Any] = [stride_length_s, stride_length_s]
snake_case_ : Dict = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case_ : Union[str, Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case_ : str = datetime.datetime.now()
snake_case_ : int = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase , __UpperCamelCase , stride=(stride_left, stride_right) , stream=__UpperCamelCase ):
# Put everything back in numpy scale
snake_case_ : Any = np.frombuffer(item["""raw"""] , dtype=__UpperCamelCase )
snake_case_ : Dict = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
snake_case_ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Tuple[int, int] , __UpperCamelCase : bool = False ):
'''simple docstring'''
snake_case_ : Dict = B""""""
snake_case_ , snake_case_ : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
snake_case_ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
snake_case_ : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
snake_case_ : Any = (_stride_left, stride_right)
snake_case_ : Optional[Any] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
snake_case_ : List[Any] = False
yield item
snake_case_ : Optional[int] = stride_left
snake_case_ : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
snake_case_ : str = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
snake_case_ : Dict = False
yield item
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[str] = 2**2_4 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase , stdout=subprocess.PIPE , bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
snake_case_ : List[str] = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
snake_case_ : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 3_8_0_1_5, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 2_5_5_0_6, """token_str""": """ accuser"""},
] , )
snake_case_ : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 3_8_0_1_5,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 2_5_5_0_6,
"""token_str""": """ accuser""",
},
] , )
snake_case_ : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
snake_case_ : int = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 3_5_6_7_6, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
snake_case_ : List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
snake_case_ : Tuple = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
] , )
snake_case_ : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
snake_case_ : str = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowercase , _lowercase )
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_lowercase )
@slow
@require_tf
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 6_1_0, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_5_7_3, """token_str""": """ Chris"""},
] , )
snake_case_ : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_lowercase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_2_0_1,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2_7_9_0,
"""token_str""": """ Lyon""",
},
] , )
snake_case_ : Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
snake_case_ : List[Any] = None
snake_case_ : int = None
self.run_pipeline_test(_lowercase , [] )
@require_tf
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
snake_case_ : Optional[Any] = None
snake_case_ : Optional[int] = None
self.run_pipeline_test(_lowercase , [] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
snake_case_ : Union[str, Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case_ : Tuple = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = fill_masker.tokenizer
snake_case_ : Union[str, Any] = fill_masker.model
snake_case_ : Any = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
] , )
snake_case_ : str = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
] , )
snake_case_ : int = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
_lowercase , [
[
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
],
[
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
],
] , )
with self.assertRaises(_lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowercase ):
fill_masker("""This is""" )
self.run_test_top_k(_lowercase , _lowercase )
self.run_test_targets(_lowercase , _lowercase )
self.run_test_top_k_targets(_lowercase , _lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowercase , _lowercase )
self.fill_mask_with_multiple_masks(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = tokenizer.get_vocab()
snake_case_ : int = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case_ : List[Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , targets=_lowercase )
snake_case_ : List[Any] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
] , )
snake_case_ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _lowercase )
snake_case_ : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_lowercase ) )
# Call argument
snake_case_ : Dict = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case_ : int = fill_masker(f'This is a {tokenizer.mask_token}' , targets=_lowercase )
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
] , )
snake_case_ : str = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _lowercase )
snake_case_ : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_lowercase ) )
# Score equivalence
snake_case_ : int = fill_masker(f'This is a {tokenizer.mask_token}' , targets=_lowercase )
snake_case_ : str = [top_mask["""token_str"""] for top_mask in outputs]
snake_case_ : Optional[Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ) == set(_lowercase ):
snake_case_ : List[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=_lowercase )
snake_case_ : int = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
# Raises with invalid
with self.assertRaises(_lowercase ):
snake_case_ : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowercase ):
snake_case_ : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] )
with self.assertRaises(_lowercase ):
snake_case_ : Optional[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , top_k=2 )
snake_case_ : str = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
] , )
snake_case_ : List[str] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case_ : List[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
] , )
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : str = tokenizer.get_vocab()
snake_case_ : int = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
# top_k=2, ntargets=3
snake_case_ : Union[str, Any] = sorted(vocab.keys() )[:3]
snake_case_ : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=_lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case_ : Any = [el["""token_str"""] for el in sorted(_lowercase , key=lambda _lowercase : x["score"] , reverse=_lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ).issubset(_lowercase ):
snake_case_ : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=_lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : int = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case_ : List[str] = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case_ : Union[str, Any] = sorted(vocab.keys() )[:3]
snake_case_ : Any = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case_ : List[str] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=_lowercase , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowercase ) , 3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
snake_case_ : int = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
_lowercase , [
[
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
],
[
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
],
[
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
{"""sequence""": ANY(_lowercase ), """score""": ANY(_lowercase ), """token""": ANY(_lowercase ), """token_str""": ANY(_lowercase )},
],
] , )
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = True , _lowercase = 1 / 2_5_5 , _lowercase = None , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , ) -> None:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case_ : List[str] = get_size_dict(_lowercase )
snake_case_ : Any = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case_ : Optional[int] = get_size_dict(_lowercase , default_to_square=_lowercase , param_name="""crop_size""" )
snake_case_ : Optional[int] = do_resize
snake_case_ : Any = do_rescale
snake_case_ : Dict = do_normalize
snake_case_ : Dict = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : str = size
snake_case_ : Dict = resample
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BILINEAR , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_size_dict(_lowercase )
if "shortest_edge" in size:
snake_case_ : str = get_resize_output_image_size(_lowercase , size=size["""shortest_edge"""] , default_to_square=_lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
snake_case_ : Union[str, Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowercase , size=(size["""height"""], size["""width"""]) , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
snake_case_ : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Any = crop_size if crop_size is not None else self.crop_size
snake_case_ : Dict = get_size_dict(_lowercase , param_name="""crop_size""" , default_to_square=_lowercase )
snake_case_ : Dict = resample if resample is not None else self.resample
snake_case_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Union[str, Any] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[int] = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_lowercase )
if not is_batched(_lowercase ):
snake_case_ : Any = [images]
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
snake_case_ : Union[str, Any] = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
snake_case_ : Union[str, Any] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case_ : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : List[str] = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''ernie_m'''
_lowerCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _lowercase = 2_5_0_0_0_2 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 3_0_7_2 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 5_1_4 , _lowercase = 0.02 , _lowercase = 1 , _lowercase = 1E-05 , _lowercase=None , _lowercase=False , _lowercase=0.0 , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : Optional[int] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Optional[int] = classifier_dropout
snake_case_ : List[str] = is_decoder
snake_case_ : Optional[int] = act_dropout
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 0
_lowerCamelCase = False
_lowerCamelCase = 3.0
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=_lowercase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
snake_case_ : Tuple = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
snake_case_ : int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _lowercase )
@require_multi_gpu
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowercase , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase : Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
__lowerCAmelCase : Optional[Any] = torch.nn.Linear(100, 200)
__lowerCAmelCase : Union[str, Any] = accelerator.prepare(model)
# Check the values changed in kwargs
__lowerCAmelCase : Optional[Any] = ''''''
__lowerCAmelCase : str = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowerCAmelCase : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case_ : Optional[Any] = k.replace(__UpperCamelCase , __UpperCamelCase )
return k
def __lowerCAmelCase ( __UpperCamelCase : dict , __UpperCamelCase : dict ):
'''simple docstring'''
snake_case_ : Any = DEFAULTS.copy()
cfg_kwargs.update(__UpperCamelCase )
snake_case_ : Union[str, Any] = PegasusConfig(**__UpperCamelCase )
snake_case_ : Tuple = PegasusForConditionalGeneration(__UpperCamelCase )
snake_case_ : Optional[int] = torch_model.model.state_dict()
snake_case_ : Dict = {}
for k, v in tf_weights.items():
snake_case_ : str = rename_state_dict_key(__UpperCamelCase )
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
snake_case_ : Optional[Any] = v.T
snake_case_ : List[str] = torch.tensor(__UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
snake_case_ : str = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
snake_case_ : Tuple = mapping["""shared.weight"""]
snake_case_ : Optional[Any] = mapping["""shared.weight"""]
snake_case_ : List[str] = {k: torch.zeros_like(__UpperCamelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = torch_model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
snake_case_ : str = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def __lowerCAmelCase ( __UpperCamelCase : List[Any]="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
snake_case_ : Dict = tf.train.list_variables(__UpperCamelCase )
snake_case_ : Any = {}
snake_case_ : List[Any] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__UpperCamelCase , desc="""converting tf checkpoint to dict""" ):
snake_case_ : Dict = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ : Union[str, Any] = tf.train.load_variable(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = array
return tf_weights
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Union[str, Any] = Path(__UpperCamelCase ).parent.name
snake_case_ : str = task_specific_params[F'summarization_{dataset}']["""max_position_embeddings"""]
snake_case_ : int = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__UpperCamelCase )
# convert model
snake_case_ : Tuple = get_tf_weights_as_numpy(__UpperCamelCase )
snake_case_ : Dict = task_specific_params[F'summarization_{dataset}']
if dataset == "large":
snake_case_ : int = task_specific_params
snake_case_ : Optional[Any] = convert_pegasus(__UpperCamelCase , __UpperCamelCase )
torch_model.save_pretrained(__UpperCamelCase )
snake_case_ : str = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__UpperCamelCase , Path(__UpperCamelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCAmelCase : Dict = parser.parse_args()
if args.save_dir is None:
__lowerCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
__lowerCAmelCase : Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
import baseaa
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def __lowerCAmelCase ( __UpperCamelCase : bytes ):
'''simple docstring'''
return baseaa.baadecode(__UpperCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = '''Hello World!'''
__lowerCAmelCase : str = baseaa_encode(test)
print(encoded)
__lowerCAmelCase : List[Any] = baseaa_decode(encoded)
print(decoded)
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any]="attention" ):
'''simple docstring'''
snake_case_ : str = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
snake_case_ : Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case_ : Union[str, Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
snake_case_ : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case_ : Union[str, Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
snake_case_ : Optional[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case_ : int = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
snake_case_ : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
if split_mlp_wi:
snake_case_ : str = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
snake_case_ : Any = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
snake_case_ : Union[str, Any] = (wi_a, wi_a)
else:
snake_case_ : Dict = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
snake_case_ : List[str] = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def __lowerCAmelCase ( __UpperCamelCase : dict , *, __UpperCamelCase : int , __UpperCamelCase : bool , __UpperCamelCase : bool = False ):
'''simple docstring'''
snake_case_ : str = traverse_util.flatten_dict(variables["""target"""] )
snake_case_ : str = {"""/""".join(__UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ : Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __UpperCamelCase )
snake_case_ : List[str] = collections.OrderedDict()
# Shared embeddings.
snake_case_ : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """attention""" )
snake_case_ : List[str] = layer_norm
snake_case_ : int = k.T
snake_case_ : List[Any] = o.T
snake_case_ : str = q.T
snake_case_ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
snake_case_ , snake_case_ : int = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , __UpperCamelCase )
snake_case_ : Dict = layer_norm
if split_mlp_wi:
snake_case_ : str = wi[0].T
snake_case_ : int = wi[1].T
else:
snake_case_ : List[str] = wi.T
snake_case_ : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ : Optional[int] = tax_relpos_bias_lookup(
__UpperCamelCase , __UpperCamelCase , """encoder""" ).T
snake_case_ : Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
snake_case_ : Optional[int] = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """encoder""" ).T
snake_case_ : str = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case_ : int = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """self_attention""" )
snake_case_ : Union[str, Any] = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : Optional[Any] = o.T
snake_case_ : Any = q.T
snake_case_ : int = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ : Union[str, Any] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """encoder_decoder_attention""" )
snake_case_ : Dict = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : int = o.T
snake_case_ : List[Any] = q.T
snake_case_ : Optional[Any] = v.T
# Block i, layer 2 (MLP).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
snake_case_ , snake_case_ : List[str] = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , __UpperCamelCase )
snake_case_ : List[str] = layer_norm
if split_mlp_wi:
snake_case_ : int = wi[0].T
snake_case_ : List[Any] = wi[1].T
else:
snake_case_ : str = wi.T
snake_case_ : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ : Optional[Any] = tax_relpos_bias_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" ).T
snake_case_ : List[str] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : bool ):
'''simple docstring'''
snake_case_ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ : Any = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ : Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
snake_case_ : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : str = convert_tax_to_pytorch(
__UpperCamelCase , num_layers=config.num_layers , is_encoder_only=__UpperCamelCase , scalable_attention=__UpperCamelCase )
snake_case_ : Optional[Any] = make_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
'''simple docstring'''
snake_case_ : int = MTaConfig.from_json_file(__UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ : Optional[Any] = UMTaEncoderModel(__UpperCamelCase )
else:
snake_case_ : int = UMTaForConditionalGeneration(__UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__UpperCamelCase )
print("""Done""" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = size if size is not None else {"""shortest_edge""": 2_0}
snake_case_ : Optional[int] = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Optional[Any] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Any = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Dict = min_resolution
snake_case_ : int = max_resolution
snake_case_ : List[Any] = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : List[str] = crop_size
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowercase , """crop_size""" ) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : List[Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : List[str] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = value_function
snake_case_ : Any = unet
snake_case_ : List[Any] = scheduler
snake_case_ : List[str] = env
snake_case_ : str = env.get_dataset()
snake_case_ : int = {}
for key in self.data.keys():
try:
snake_case_ : Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ : Any = {}
for key in self.data.keys():
try:
snake_case_ : str = self.data[key].std()
except: # noqa: E722
pass
snake_case_ : Optional[Any] = env.observation_space.shape[0]
snake_case_ : Any = env.action_space.shape[0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
if type(_lowercase ) is dict:
return {k: self.to_torch(_lowercase ) for k, v in x_in.items()}
elif torch.is_tensor(_lowercase ):
return x_in.to(self.unet.device )
return torch.tensor(_lowercase , device=self.unet.device )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
for key, val in cond.items():
snake_case_ : Union[str, Any] = val.clone()
return x_in
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = x.shape[0]
snake_case_ : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ : int = torch.full((batch_size,) , _lowercase , device=self.unet.device , dtype=torch.long )
for _ in range(_lowercase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ : Union[str, Any] = self.value_function(x.permute(0 , 2 , 1 ) , _lowercase ).sample
snake_case_ : Tuple = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ : Optional[Any] = self.scheduler._get_variance(_lowercase )
snake_case_ : List[str] = torch.exp(0.5 * posterior_variance )
snake_case_ : Optional[int] = model_std * grad
snake_case_ : int = 0
snake_case_ : Tuple = x.detach()
snake_case_ : Any = x + scale * grad
snake_case_ : int = self.reset_xa(_lowercase , _lowercase , self.action_dim )
snake_case_ : Dict = self.unet(x.permute(0 , 2 , 1 ) , _lowercase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ : Dict = self.scheduler.step(_lowercase , _lowercase , _lowercase , predict_epsilon=_lowercase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
snake_case_ : List[Any] = self.reset_xa(_lowercase , _lowercase , self.action_dim )
snake_case_ : List[str] = self.to_torch(_lowercase )
return x, y
def __call__( self , _lowercase , _lowercase=6_4 , _lowercase=3_2 , _lowercase=2 , _lowercase=0.1 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.normalize(_lowercase , """observations""" )
snake_case_ : int = obs[None].repeat(_lowercase , axis=0 )
snake_case_ : List[Any] = {0: self.to_torch(_lowercase )}
snake_case_ : Union[str, Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ : List[str] = randn_tensor(_lowercase , device=self.unet.device )
snake_case_ : Optional[Any] = self.reset_xa(_lowercase , _lowercase , self.action_dim )
snake_case_ : int = self.to_torch(_lowercase )
# run the diffusion process
snake_case_ , snake_case_ : int = self.run_diffusion(_lowercase , _lowercase , _lowercase , _lowercase )
# sort output trajectories by value
snake_case_ : Tuple = y.argsort(0 , descending=_lowercase ).squeeze()
snake_case_ : Any = x[sorted_idx]
snake_case_ : List[Any] = sorted_values[:, :, : self.action_dim]
snake_case_ : Optional[int] = actions.detach().cpu().numpy()
snake_case_ : int = self.de_normalize(_lowercase , key="""actions""" )
# select the action with the highest value
if y is not None:
snake_case_ : int = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ : Union[str, Any] = np.random.randint(0 , _lowercase )
snake_case_ : str = denorm_actions[selected_index, 0]
return denorm_actions
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = load_tool("""text-classification""" )
self.tool.setup()
snake_case_ : Union[str, Any] = load_tool("""text-classification""" , remote=_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(_lowercase , """positive""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(_lowercase , """positive""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(_lowercase , """positive""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(_lowercase , """positive""" )
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_lowercase ):
snake_case_ : int = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
snake_case_ : str = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_lowercase ):
snake_case_ : str = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
snake_case_ : Dict = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase )
snake_case_ : Optional[int] = FlaxBertModel.from_pretrained(_lowercase )
snake_case_ : int = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
snake_case_ : Any = AutoTokenizer.from_pretrained(_lowercase )
snake_case_ : Tuple = FlaxRobertaModel.from_pretrained(_lowercase )
snake_case_ : Union[str, Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
snake_case_ : Optional[int] = FlaxAutoModel.from_pretrained("""bert-base""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case_ : List[Any] = FlaxAutoModel.from_pretrained(_lowercase , revision="""aaaaaa""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
snake_case_ : int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(_lowercase , """Use `from_pt=True` to load this model""" ):
snake_case_ : Dict = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
from collections import defaultdict
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = 1
snake_case_ : int = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCamelCase )
if ret % 2 == 0:
cuts.append(__UpperCamelCase )
return ret
def __lowerCAmelCase ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase : List[Any] = 10, 9
__lowerCAmelCase : List[str] = defaultdict(list)
__lowerCAmelCase : dict[int, bool] = {}
__lowerCAmelCase : list[int] = []
__lowerCAmelCase : int = 0
__lowerCAmelCase : Optional[int] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__lowerCAmelCase : Optional[Any] = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case_ : Tuple = bs[:]
snake_case_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : Dict = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = set()
snake_case_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : str = char
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
snake_case_ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
snake_case_ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : Optional[Any] = json.load(_lowercase )
snake_case_ : Any = {v: k for k, v in self.encoder.items()}
snake_case_ : Optional[Any] = errors # how to handle errors in decoding
snake_case_ : List[Any] = bytes_to_unicode()
snake_case_ : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
snake_case_ : str = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : List[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Dict = {}
snake_case_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : Optional[int] = tuple(_lowercase )
snake_case_ : List[Any] = get_pairs(_lowercase )
if not pairs:
return token
while True:
snake_case_ : Optional[int] = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : List[str] = bigram
snake_case_ : List[str] = []
snake_case_ : Optional[int] = 0
while i < len(_lowercase ):
try:
snake_case_ : Any = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : List[str] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_lowercase )
snake_case_ : int = new_word
if len(_lowercase ) == 1:
break
else:
snake_case_ : Tuple = get_pairs(_lowercase )
snake_case_ : Dict = """ """.join(_lowercase )
snake_case_ : Any = word
return word
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = []
for token in re.findall(self.pat , _lowercase ):
snake_case_ : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : str = """""".join(_lowercase )
snake_case_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : int = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[int] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
snake_case_ : Optional[Any] = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : Union[str, Any] = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
snake_case_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
snake_case_ : Any = """ """ + text
return (text, kwargs)
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = 0.9 , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = None , _lowercase = 1 / 2_5_5 , _lowercase = True , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , ) -> None:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = size if size is not None else {"""shortest_edge""": 2_2_4}
snake_case_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
snake_case_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case_ : Union[str, Any] = get_size_dict(_lowercase , param_name="""crop_size""" )
snake_case_ : Optional[Any] = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Any = crop_pct
snake_case_ : Optional[int] = resample
snake_case_ : List[str] = do_center_crop
snake_case_ : List[str] = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : str = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Tuple = int(size["""height"""] / crop_pct )
else:
snake_case_ : Optional[Any] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(_lowercase ) )
snake_case_ : Dict = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase )
else:
if "shortest_edge" in size:
snake_case_ : int = get_resize_output_image_size(_lowercase , size=size["""shortest_edge"""] , default_to_square=_lowercase )
elif "height" in size and "width" in size:
snake_case_ : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(_lowercase ) )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : int = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(_lowercase , size=(size["""height"""], size["""width"""]) , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> List[str]:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : int = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : Dict = resample if resample is not None else self.resample
snake_case_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : str = get_size_dict(_lowercase , default_to_square=_lowercase )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_lowercase , param_name="""crop_size""" )
snake_case_ : List[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : List[Any] = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
snake_case_ : List[Any] = [self.resize(image=_lowercase , size=_lowercase , crop_pct=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case_ : int = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=3 , _lowercase=3_2 , _lowercase=3 , _lowercase=1_0 , _lowercase=[1_0, 2_0, 3_0, 4_0] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : str = num_channels
snake_case_ : Union[str, Any] = embeddings_size
snake_case_ : Tuple = hidden_sizes
snake_case_ : int = depths
snake_case_ : int = is_training
snake_case_ : Dict = use_labels
snake_case_ : List[str] = hidden_act
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = scope
snake_case_ : Tuple = len(_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = TFResNetModel(config=_lowercase )
snake_case_ : Tuple = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Optional[Any] = TFResNetForImageClassification(_lowercase )
snake_case_ : Optional[int] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs
snake_case_ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowerCamelCase = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = TFResNetModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_lowercase )
snake_case_ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Any = [*signature.parameters.keys()]
snake_case_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
snake_case_ : str = model_class(_lowercase )
snake_case_ : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase ) )
snake_case_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : Any = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ : List[str] = layer_type
snake_case_ : Any = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Tuple = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = TFResNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : str = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : Dict = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
snake_case_ : Dict = model(**_lowercase )
# verify the logits
snake_case_ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
snake_case_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowercase , atol=1E-4 ) )
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase : Optional[int] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase : int = {
'''gpt-neox-20b''': 2048,
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowercase ) != add_prefix_space:
snake_case_ : Any = getattr(_lowercase , pre_tok_state.pop("""type""" ) )
snake_case_ : Tuple = add_prefix_space
snake_case_ : str = pre_tok_class(**_lowercase )
snake_case_ : int = add_prefix_space
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Any = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
snake_case_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(__UpperCamelCase : int , __UpperCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : Tuple = update_area_of_max_square(__UpperCamelCase , col + 1 )
snake_case_ : Tuple = update_area_of_max_square(row + 1 , col + 1 )
snake_case_ : List[str] = update_area_of_max_square(row + 1 , __UpperCamelCase )
if mat[row][col]:
snake_case_ : Union[str, Any] = 1 + min([right, diagonal, down] )
snake_case_ : Any = max(largest_square_area[0] , __UpperCamelCase )
return sub_problem_sol
else:
return 0
snake_case_ : str = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Optional[int] = update_area_of_max_square_using_dp_array(__UpperCamelCase , col + 1 , __UpperCamelCase )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __UpperCamelCase )
snake_case_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , __UpperCamelCase , __UpperCamelCase )
if mat[row][col]:
snake_case_ : Any = 1 + min([right, diagonal, down] )
snake_case_ : str = max(largest_square_area[0] , __UpperCamelCase )
snake_case_ : str = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[str] = [0]
snake_case_ : List[str] = [[-1] * cols for _ in range(__UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __UpperCamelCase )
return largest_square_area[0]
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[list[int]] ):
'''simple docstring'''
snake_case_ : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case_ : int = dp_array[row][col + 1]
snake_case_ : Optional[int] = dp_array[row + 1][col + 1]
snake_case_ : int = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = max(dp_array[row][col] , __UpperCamelCase )
else:
snake_case_ : Any = 0
return largest_square_area
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[list[int]] ):
'''simple docstring'''
snake_case_ : List[str] = [0] * (cols + 1)
snake_case_ : int = [0] * (cols + 1)
snake_case_ : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case_ : Dict = current_row[col + 1]
snake_case_ : List[str] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Dict = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : int = max(current_row[col] , __UpperCamelCase )
else:
snake_case_ : List[Any] = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( ):
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = 1
snake_case_ : List[str] = 2
while i * i <= n:
snake_case_ : str = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCAmelCase ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(__UpperCamelCase ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
def __lowerCAmelCase ( *__UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__UpperCamelCase : Any , **__UpperCamelCase : str ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__UpperCamelCase : List[str] , **__UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
def __lowerCAmelCase ( *__UpperCamelCase : List[str] , **__UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''torch''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=1_6 , _lowercase=3_6 , _lowercase=6 , _lowercase=6 , _lowercase=6 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> int:
'''simple docstring'''
snake_case_ : Dict = parent
snake_case_ : Dict = batch_size
snake_case_ : str = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Tuple = use_input_mask
snake_case_ : Dict = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : List[Any] = embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Any = num_hidden_groups
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_labels
snake_case_ : Dict = num_choices
snake_case_ : Tuple = scope
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_input_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = None
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
snake_case_ : List[str] = model(_lowercase , token_type_ids=_lowercase )
snake_case_ : Dict = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : List[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.num_labels
snake_case_ : Any = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Dict = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.num_labels
snake_case_ : List[str] = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.num_choices
snake_case_ : Any = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Union[str, Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
snake_case_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = AlbertModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : List[Any] = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = AlbertModel.from_pretrained("""albert-base-v2""" )
snake_case_ : str = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case_ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ : Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
snake_case_ : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowercase )
snake_case_ : Any = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
snake_case_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ : Dict = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
snake_case_ : Optional[int] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@require_tf
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
snake_case_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ : Any = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
snake_case_ : str = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
snake_case_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ : Optional[int] = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
snake_case_ : Optional[Any] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
snake_case_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ : Dict = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
snake_case_ : List[str] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def __lowerCAmelCase ( __UpperCamelCase : Callable ):
'''simple docstring'''
@wraps(__UpperCamelCase )
def _inner_fn(*__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , __UpperCamelCase , )
return fn(*__UpperCamelCase , **__UpperCamelCase )
return _inner_fn
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : Dict = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__lowerCAmelCase : List[Any] = '''sshleifer/mar_enro_6_3_student'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
snake_case_ : int = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=_lowercase , )
snake_case_ : Any = f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
MarianMTModel.from_pretrained(_lowercase )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = {
"""$MAX_LEN""": 6_4,
"""$BS""": 6_4,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
snake_case_ : List[str] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
snake_case_ : List[str] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
snake_case_ : Union[str, Any] = bash_script.replace(_lowercase , str(_lowercase ) )
snake_case_ : List[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
snake_case_ : Optional[Any] = f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
snake_case_ : Union[str, Any] = ["""finetune.py"""] + bash_script.split() + args
with patch.object(_lowercase , """argv""" , _lowercase ):
snake_case_ : Dict = argparse.ArgumentParser()
snake_case_ : List[Any] = pl.Trainer.add_argparse_args(_lowercase )
snake_case_ : List[Any] = SummarizationModule.add_model_specific_args(_lowercase , os.getcwd() )
snake_case_ : int = parser.parse_args()
snake_case_ : List[Any] = main(_lowercase )
# Check metrics
snake_case_ : Union[str, Any] = load_json(model.metrics_save_path )
snake_case_ : Optional[int] = metrics["""val"""][0]
snake_case_ : List[str] = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , _lowercase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case_ : Optional[Any] = os.listdir(_lowercase )
snake_case_ : Union[str, Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
snake_case_ : Any = os.path.join(args.output_dir , _lowercase )
snake_case_ : str = torch.load(_lowercase , map_location="""cpu""" )
snake_case_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case_ : Dict = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = f'{self.test_file_dir_str}/test_data/wmt_en_ro'
snake_case_ : List[Any] = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_2_8,
"""$BS""": 1_6,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
snake_case_ : Any = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
snake_case_ : Union[str, Any] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
snake_case_ : int = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
snake_case_ : Optional[Any] = bash_script.replace(_lowercase , str(_lowercase ) )
snake_case_ : Any = self.get_auto_remove_tmp_dir()
snake_case_ : Dict = bash_script.replace("""--fp16""" , """""" )
snake_case_ : Union[str, Any] = 6
snake_case_ : Optional[Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
"""--gpus=1""",
"""--learning_rate=1e-3""",
f'--num_train_epochs={epochs}',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(_lowercase , """argv""" , _lowercase ):
snake_case_ : int = argparse.ArgumentParser()
snake_case_ : List[Any] = pl.Trainer.add_argparse_args(_lowercase )
snake_case_ : Union[str, Any] = SummarizationDistiller.add_model_specific_args(_lowercase , os.getcwd() )
snake_case_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
snake_case_ : int = distill_main(_lowercase )
# Check metrics
snake_case_ : Dict = load_json(model.metrics_save_path )
snake_case_ : int = metrics["""val"""][0]
snake_case_ : List[Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , _lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
snake_case_ : int = os.listdir(_lowercase )
snake_case_ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
snake_case_ : List[str] = os.path.join(args.output_dir , _lowercase )
snake_case_ : Dict = torch.load(_lowercase , map_location="""cpu""" )
snake_case_ : Dict = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
snake_case_ : Tuple = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''blip_2_vision_model'''
def __init__( self , _lowercase=1_4_0_8 , _lowercase=6_1_4_4 , _lowercase=3_9 , _lowercase=1_6 , _lowercase=2_2_4 , _lowercase=1_4 , _lowercase="gelu" , _lowercase=0.0_0001 , _lowercase=0.0 , _lowercase=1E-10 , _lowercase=True , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : int = hidden_size
snake_case_ : Any = intermediate_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = patch_size
snake_case_ : Dict = image_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : int = hidden_act
snake_case_ : Optional[Any] = qkv_bias
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
snake_case_ , snake_case_ : Tuple = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
snake_case_ : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''blip_2_qformer'''
def __init__( self , _lowercase=3_0_5_2_2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase="absolute" , _lowercase=2 , _lowercase=1_4_0_8 , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : str = hidden_act
snake_case_ : int = intermediate_size
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : List[str] = cross_attention_frequency
snake_case_ : Union[str, Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
snake_case_ , snake_case_ : Optional[int] = cls.get_config_dict(_lowercase , **_lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
snake_case_ : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''blip-2'''
_lowerCamelCase = True
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=3_2 , **_lowercase ) -> Any:
'''simple docstring'''
super().__init__(**_lowercase )
if vision_config is None:
snake_case_ : Optional[int] = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
snake_case_ : Union[str, Any] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
snake_case_ : Tuple = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
snake_case_ : Tuple = BlipaVisionConfig(**_lowercase )
snake_case_ : Union[str, Any] = BlipaQFormerConfig(**_lowercase )
snake_case_ : Union[str, Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
snake_case_ : Optional[int] = CONFIG_MAPPING[text_model_type](**_lowercase )
snake_case_ : Optional[int] = self.text_config.tie_word_embeddings
snake_case_ : Dict = self.text_config.is_encoder_decoder
snake_case_ : Any = num_query_tokens
snake_case_ : List[str] = self.vision_config.hidden_size
snake_case_ : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case_ : Dict = 1.0
snake_case_ : Optional[Any] = 0.02
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , _lowercase , _lowercase , **_lowercase , ) -> Dict:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Union[str, Any] = self.qformer_config.to_dict()
snake_case_ : Dict = self.text_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__UpperCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__UpperCamelCase ) == 1:
return True
snake_case_ : Any = series[1] - series[0]
for index in range(len(__UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowerCAmelCase ( __UpperCamelCase : list ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__UpperCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
snake_case_ : List[Any] = 0
for val in series:
answer += val
return answer / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : List[str] = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCAmelCase : List[str] = spec.loader.load_module()
__lowerCAmelCase : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCAmelCase : Tuple = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCAmelCase : Union[str, Any] = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ : Union[str, Any] = False
# source code of `config_class`
snake_case_ : int = inspect.getsource(__UpperCamelCase )
snake_case_ : str = _re_checkpoint.findall(__UpperCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ , snake_case_ : int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : Dict = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ : Optional[Any] = True
break
snake_case_ : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ : Any = """\n""".join(sorted(__UpperCamelCase ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : str = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Dict = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : Tuple = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str]="binary" ):
'''simple docstring'''
snake_case_ : int = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Tuple = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Union[str, Any] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : List[str] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : Tuple = [(pred, label)]
snake_case_ , snake_case_ : Optional[Any] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Any = zip(*__UpperCamelCase )
snake_case_ : str = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Union[str, Any] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Optional[Any] = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : List[str] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : List[str] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
snake_case_ : Union[str, Any] = quote(__UpperCamelCase )
return hfh.hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" , revision=__UpperCamelCase )
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[str] = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
snake_case_ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if metric == "rouge2":
snake_case_ : List[str] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
snake_case_ : Optional[int] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
snake_case_ : Union[str, Any] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
snake_case_ : int = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
snake_case_ : Union[str, Any] = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=F'val_{metric}' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : int ):
'''simple docstring'''
return EarlyStopping(
monitor=F'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class _lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = {f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowercase )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> None:
'''simple docstring'''
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
snake_case_ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
snake_case_ : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case_ : Dict = od / """test_results.txt"""
snake_case_ : Dict = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case_ : str = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
snake_case_ : Tuple = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_lowercase )
generations_file.parent.mkdir(exist_ok=_lowercase )
with open(_lowercase , """a+""" ) as writer:
for key in sorted(_lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case_ : int = metrics[key]
if isinstance(_lowercase , torch.Tensor ):
snake_case_ : Optional[Any] = val.item()
snake_case_ : int = f'{key}: {val:.6f}\n'
writer.write(_lowercase )
if not save_generations:
return
if "preds" in metrics:
snake_case_ : Any = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_lowercase )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
try:
snake_case_ : str = pl_module.model.model.num_parameters()
except AttributeError:
snake_case_ : List[str] = pl_module.model.num_parameters()
snake_case_ : str = count_trainable_parameters(_lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowercase , _lowercase , """test""" )
@rank_zero_only
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
def decorator(__UpperCamelCase : List[Any] ):
snake_case_ : int = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
def __lowerCAmelCase ( *__UpperCamelCase : List[str] ):
'''simple docstring'''
def decorator(__UpperCamelCase : List[Any] ):
snake_case_ : Optional[Any] = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __new__( cls , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : str = super().__new__(cls , _lowercase , _lowercase , _lowercase )
if not hasattr(_lowercase , """key_handler""" ):
setattr(_lowercase , """key_handler""" , {} )
setattr(_lowercase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case_ : Optional[Any] = getattr(_lowercase , """handle_key""" , [] )
for key in handled_keys:
snake_case_ : Optional[Any] = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
snake_case_ : List[Any] = ord(_lowercase )
snake_case_ : int = cls.key_handler.get(_lowercase )
if handler:
snake_case_ : Tuple = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls : Dict ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCAmelCase ( __UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def __lowerCAmelCase ( __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray ):
'''simple docstring'''
snake_case_ : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(__UpperCamelCase , __UpperCamelCase )
# Predict target for test data
snake_case_ : List[str] = xgb.predict(__UpperCamelCase )
snake_case_ : str = predictions.reshape(len(__UpperCamelCase ) , 1 )
return predictions
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = fetch_california_housing()
snake_case_ , snake_case_ : Dict = data_handling(__UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = train_test_split(
__UpperCamelCase , __UpperCamelCase , test_size=0.25 , random_state=1 )
snake_case_ : Union[str, Any] = xgboost(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(__UpperCamelCase , __UpperCamelCase )}' )
print(F'Mean Square Error : {mean_squared_error(__UpperCamelCase , __UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[Any] = batch_size
snake_case_ : str = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : str = use_token_type_ids
snake_case_ : List[str] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : List[str] = num_choices
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Tuple = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Any = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Optional[int] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : str = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[Any] = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Union[str, Any] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Dict = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : List[str] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0 ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or n < 0:
raise ValueError("""Invalid input""" )
snake_case_ : Dict = 1_0**n
snake_case_ : List[str] = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __UpperCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
__lowerCAmelCase : Dict = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __lowerCAmelCase ( __UpperCamelCase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__UpperCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__UpperCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__UpperCamelCase ) != count_coins(__UpperCamelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__UpperCamelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ : List[str] = get_distrib(node.left )
snake_case_ , snake_case_ : Any = get_distrib(node.right )
snake_case_ : List[str] = 1 - left_distrib_excess
snake_case_ : Optional[Any] = 1 - right_distrib_excess
snake_case_ : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(__UpperCamelCase )
+ abs(__UpperCamelCase )
)
snake_case_ : Dict = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__UpperCamelCase , __UpperCamelCase )
return get_distrib(__UpperCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
import requests
__lowerCAmelCase : int = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : List[Any] = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCAmelCase : Optional[int] = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Any = SavedModel()
snake_case_ : str = []
with open(os.path.join(__UpperCamelCase , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
snake_case_ : str = json.load(__UpperCamelCase )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCamelCase )] )
with open(__UpperCamelCase , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : Optional[Any] = sorted(__UpperCamelCase )
snake_case_ : Any = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCamelCase )
if strict and len(__UpperCamelCase ) > 0:
raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(__UpperCamelCase ) > 0:
print(F'Found the following incompatible ops for the opset {opset}:' )
print(*__UpperCamelCase , sep="""\n""" )
else:
print(F'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCAmelCase : Tuple = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
import math
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
__lowerCAmelCase : str = '''src/transformers'''
# Matches is_xxx_available()
__lowerCAmelCase : Dict = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowerCAmelCase : List[str] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : int = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Tuple = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : Dict = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Union[str, Any] = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : int = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowerCAmelCase : Optional[int] = re.compile(R'''^\s*try:''')
# Catches a line with else:
__lowerCAmelCase : Dict = re.compile(R'''^\s*else:''')
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
snake_case_ : Union[str, Any] = [b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : int = f.readlines()
snake_case_ : Union[str, Any] = 0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ : Optional[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case_ : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
snake_case_ : Dict = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
snake_case_ : Optional[Any] = re.findall(r"""\[([^\]]+)\]""" , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case_ : int = _re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
snake_case_ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case_ : int = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case_ : Tuple = lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
snake_case_ : int = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(""", """ )
snake_case_ : Optional[int] = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
snake_case_ : Optional[Any] = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(""", """ )
snake_case_ : Any = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
snake_case_ : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ : Any = []
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case_ : List[str] = lines[line_index]
snake_case_ : Optional[int] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case_ : Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case_ : int = lines[line_index]
snake_case_ : List[str] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
def find_duplicates(__UpperCamelCase : Any ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case_ : List[str] = []
for key in import_dict_objects.keys():
snake_case_ : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
snake_case_ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case_ : int = """base imports""" if key == """none""" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
snake_case_ : Tuple = os.path.join(__UpperCamelCase , """__init__.py""" )
snake_case_ : List[Any] = parse_init(__UpperCamelCase )
if objects is not None:
snake_case_ : Tuple = analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ : List[str] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError("""\n\n""".join(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = []
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case_ : List[str] = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
snake_case_ : Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case_ : List[Any] = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
snake_case_ : Union[str, Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
__lowerCAmelCase : Tuple = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __lowerCAmelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
snake_case_ : Tuple = direct_transformers_import(__UpperCamelCase )
snake_case_ : Dict = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCamelCase , """__init__.py""" ) , """r""" ) as f:
snake_case_ : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , __UpperCamelCase ) ) )
snake_case_ : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCamelCase ) > 0:
snake_case_ : Optional[Any] = """\n""".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _lowerCAmelCase :
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = tokenizer
snake_case_ : Union[str, Any] = skip_prompt
snake_case_ : List[str] = decode_kwargs
# variables used in the streaming process
snake_case_ : Union[str, Any] = []
snake_case_ : Tuple = 0
snake_case_ : str = True
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
snake_case_ : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case_ : Union[str, Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case_ : Optional[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
snake_case_ : List[Any] = text[self.print_len :]
snake_case_ : Optional[int] = []
snake_case_ : Dict = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case_ : List[str] = text[self.print_len :]
self.print_len += len(_lowercase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case_ : Union[str, Any] = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(_lowercase )
self.on_finalized_text(_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if len(self.token_cache ) > 0:
snake_case_ : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case_ : int = text[self.print_len :]
snake_case_ : str = []
snake_case_ : str = 0
else:
snake_case_ : Optional[Any] = """"""
snake_case_ : Tuple = True
self.on_finalized_text(_lowercase , stream_end=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False ) -> List[str]:
'''simple docstring'''
print(_lowercase , flush=_lowercase , end="""""" if not stream_end else None )
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = False , _lowercase = None , **_lowercase ) -> List[str]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase , **_lowercase )
snake_case_ : str = Queue()
snake_case_ : int = None
snake_case_ : Union[str, Any] = timeout
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False ) -> List[Any]:
'''simple docstring'''
self.text_queue.put(_lowercase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return self
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_2_8, """min_length""": 1_2, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_4_2, """min_length""": 5_6, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 6_2, """min_length""": 1_1, """num_beams""": 6},
}
}
snake_case_ : int = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_2_8,
"""task_specific_params.summarization.min_length""": 1_2,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_4_2,
"""task_specific_params.summarization_cnn.min_length""": 5_6,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 6_2,
"""task_specific_params.summarization_xsum.min_length""": 1_1,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowercase ) , x.transpose() ) )
snake_case_ : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : int = np.random.randn(3 , 4 )
snake_case_ : Union[str, Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
snake_case_ : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = np.random.randn(3 , 4 )
snake_case_ : Union[str, Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
snake_case_ : List[Any] = np.random.randn(3 , 4 , 5 )
snake_case_ : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = np.random.randn(3 , 4 )
snake_case_ : Tuple = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , np.asarray(transpose(_lowercase ) ) ) )
snake_case_ : Any = np.random.randn(3 , 4 , 5 )
snake_case_ : List[str] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowercase , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.reshape(_lowercase , (4, 3) ) ) )
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , np.reshape(_lowercase , (1_2, 5) ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 )
snake_case_ : Optional[int] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
snake_case_ : Dict = np.random.randn(3 , 4 , 5 )
snake_case_ : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , reshape(_lowercase , (1_2, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 )
snake_case_ : Dict = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
snake_case_ : Optional[int] = np.random.randn(3 , 4 , 5 )
snake_case_ : Tuple = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , reshape(_lowercase , (1_2, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = np.random.randn(3 , 4 )
snake_case_ : List[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.asarray(reshape(_lowercase , (4, 3) ) ) ) )
snake_case_ : Optional[int] = np.random.randn(3 , 4 , 5 )
snake_case_ : List[str] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , np.asarray(reshape(_lowercase , (1_2, 5) ) ) ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.squeeze(_lowercase ) ) )
snake_case_ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.squeeze(_lowercase , axis=2 ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(1 , 3 , 4 )
snake_case_ : Dict = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
snake_case_ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case_ : Optional[int] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(1 , 3 , 4 )
snake_case_ : Optional[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
snake_case_ : Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case_ : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(1 , 3 , 4 )
snake_case_ : Any = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.asarray(squeeze(_lowercase ) ) ) )
snake_case_ : Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case_ : Any = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.asarray(squeeze(_lowercase , axis=2 ) ) ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.expand_dims(_lowercase , axis=1 ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = np.random.randn(3 , 4 )
snake_case_ : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = np.random.randn(3 , 4 )
snake_case_ : List[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = np.random.randn(3 , 4 )
snake_case_ : List[str] = jnp.array(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.asarray(expand_dims(_lowercase , axis=1 ) ) ) )
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[str] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = IFPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return self._get_dummy_components()
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> List[str]:
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
snake_case_ : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
snake_case_ : Optional[int] = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_lowercase , tokenizer=_lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
snake_case_ , snake_case_ : int = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case_ : Union[str, Any] = None
snake_case_ : int = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case_ : int = IFImgaImgPipeline(**pipe_a.components )
snake_case_ : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case_ : int = IFInpaintingPipeline(**pipe_a.components )
snake_case_ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : int = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : str = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Union[str, Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
snake_case_ : List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : Union[str, Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case_ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_lowercase )
snake_case_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : int = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Any = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : Dict = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_lowercase )
snake_case_ : int = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def __lowerCAmelCase ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = num_of_nodes
snake_case_ : list[list[int]] = []
snake_case_ : dict[int, int] = {}
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case_ : Dict = self.find_component(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
snake_case_ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
snake_case_ : str = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : Any = 0
snake_case_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case_ : Tuple = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case_ , snake_case_ , snake_case_ : Tuple = edge
snake_case_ : List[str] = self.m_component[u]
snake_case_ : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case_ : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
snake_case_ , snake_case_ , snake_case_ : Tuple = edge
snake_case_ : Any = self.m_component[u]
snake_case_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
snake_case_ : Optional[Any] = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def __lowerCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''owlvit_text_model'''
def __init__( self , _lowercase=4_9_4_0_8 , _lowercase=5_1_2 , _lowercase=2_0_4_8 , _lowercase=1_2 , _lowercase=8 , _lowercase=1_6 , _lowercase="quick_gelu" , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=0 , _lowercase=4_9_4_0_6 , _lowercase=4_9_4_0_7 , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : int = vocab_size
snake_case_ : Tuple = hidden_size
snake_case_ : Any = intermediate_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Any = layer_norm_eps
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Any = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
snake_case_ , snake_case_ : Union[str, Any] = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
snake_case_ : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''owlvit_vision_model'''
def __init__( self , _lowercase=7_6_8 , _lowercase=3_0_7_2 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3 , _lowercase=7_6_8 , _lowercase=3_2 , _lowercase="quick_gelu" , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , **_lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : Union[str, Any] = num_channels
snake_case_ : int = image_size
snake_case_ : Dict = patch_size
snake_case_ : Tuple = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[int] = attention_dropout
snake_case_ : str = initializer_range
snake_case_ : Union[str, Any] = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
snake_case_ , snake_case_ : int = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
snake_case_ : Any = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''owlvit'''
_lowerCamelCase = True
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=5_1_2 , _lowercase=2.6592 , _lowercase=True , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if text_config is None:
snake_case_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
snake_case_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
snake_case_ : Optional[int] = OwlViTTextConfig(**_lowercase )
snake_case_ : Optional[int] = OwlViTVisionConfig(**_lowercase )
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = logit_scale_init_value
snake_case_ : int = return_dict
snake_case_ : List[str] = 1.0
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
snake_case_ , snake_case_ : List[str] = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , _lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = {}
snake_case_ : List[str] = text_config
snake_case_ : Any = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Dict = self.text_config.to_dict()
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : int = self.__class__.model_type
return output
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Any = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
snake_case_ : Any = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 1_4
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : List[str] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
snake_case_ : Optional[Any] = Vector()
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowercase ) , """(0,0,0,0,0,1)""" )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Any = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowercase ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Union[str, Any] = Vector([1, 2] )
snake_case_ : Optional[Any] = Vector([1, 2, 3, 4, 5] )
snake_case_ : Any = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
snake_case_ : List[str] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = Vector([1, 2, 3] )
snake_case_ : str = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Tuple = Vector([1, 2, 3] )
snake_case_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : List[str] = Vector([1, 2, 3] )
snake_case_ : int = Vector([2, -1, 4] ) # for test of dot product
snake_case_ : Optional[int] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(1_0 ) ).count("""0""" ) , 1_0 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Dict = Vector([1, 2, 3] )
snake_case_ : List[str] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , """(3,4,7)""" )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : int = Vector([1, 0, 0, 0, 0, 0] )
snake_case_ : Optional[Any] = x.copy()
self.assertEqual(str(_lowercase ) , str(_lowercase ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Optional[Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowercase ) , """(0,1,0)""" )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ : Optional[Any] = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ : str = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
snake_case_ : Dict = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ : Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : Optional[int] = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = ['''DPTFeatureExtractor''']
__lowerCAmelCase : str = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = int(__UpperCamelCase )
assert noofclusters < len(__UpperCamelCase )
# Find out the dimensionality
snake_case_ : List[str] = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case_ : Union[str, Any] = list(range(len(__UpperCamelCase ) ) )
shuffle(__UpperCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case_ : Optional[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case_ : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case_ : int = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case_ : Any = tf.placeholder("""float64""" , [dim] )
snake_case_ : int = []
for centroid in centroids:
cent_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case_ : Tuple = [tf.Variable(0 ) for i in range(len(__UpperCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case_ : Dict = tf.placeholder("""int32""" )
snake_case_ : List[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case_ : Union[str, Any] = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case_ : str = tf.reduce_mean(__UpperCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case_ : Union[str, Any] = tf.placeholder("""float""" , [dim] )
snake_case_ : Any = tf.placeholder("""float""" , [dim] )
snake_case_ : Optional[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCamelCase , __UpperCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case_ : Union[str, Any] = tf.placeholder("""float""" , [noofclusters] )
snake_case_ : List[Any] = tf.argmin(__UpperCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case_ : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(__UpperCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case_ : Union[str, Any] = 1_0_0
for _ in range(__UpperCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__UpperCamelCase ) ):
snake_case_ : Optional[int] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case_ : Dict = [
sess.run(__UpperCamelCase , feed_dict={va: vect, va: sess.run(__UpperCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case_ : List[str] = sess.run(
__UpperCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__UpperCamelCase ):
# Collect all the vectors assigned to this cluster
snake_case_ : Any = [
vectors[i]
for i in range(len(__UpperCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case_ : Dict = sess.run(
__UpperCamelCase , feed_dict={mean_input: array(__UpperCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case_ : List[str] = sess.run(__UpperCamelCase )
snake_case_ : List[str] = sess.run(__UpperCamelCase )
return centroids, assignments
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = os.path.join(args.tf_model_dir , """parameters.json""" )
snake_case_ : List[str] = json.loads(open(__UpperCamelCase ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(""".pt""" ):
snake_case_ : Optional[Any] = args.output + """.pt"""
snake_case_ : List[Any] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ : Any = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ : Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ : Union[str, Any] = reader.get_tensor(__UpperCamelCase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ : Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ : Union[str, Any] = 8
snake_case_ : Optional[Any] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Dict = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/moe""" ):
snake_case_ : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ : Dict = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Union[str, Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ : Tuple = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[str] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ : List[Any] = key_name[-9:-7]
for i in range(1_6 ):
snake_case_ : int = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ : int = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ : Optional[int] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Optional[int] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ : str = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ : Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ : Any = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ : Any = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : str = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ : List[Any] = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ : List[str] = vnp.copy() # same because it is one dimensional
snake_case_ : Tuple = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/ln""" ):
snake_case_ : Optional[Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ : Optional[int] = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ : Tuple = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/g""" ):
snake_case_ : List[Any] = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ : Dict = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[int] = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/att""" ):
snake_case_ : List[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ : Tuple = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ : str = state[:, 0, :, :]
snake_case_ : str = state[:, 1, :, :]
snake_case_ : Optional[Any] = state[:, 2, :, :]
snake_case_ : Any = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[str] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase )
snake_case_ : Dict = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ : int = torch.tensor(__UpperCamelCase )
snake_case_ : Tuple = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ : Tuple = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ : List[str] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ : Dict = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Union[str, Any] = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/an""" ):
snake_case_ : Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ : Optional[int] = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ : Optional[Any] = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/g""" ):
snake_case_ : str = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ : str = vnp.copy() # same because it is one dimensional
snake_case_ : Dict = torch.tensor(__UpperCamelCase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ : Optional[int] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ : Union[str, Any] = """model.%s.weight""" % nlayer
snake_case_ : Optional[Any] = vnp.copy() # same in embedded
snake_case_ : str = torch.tensor(__UpperCamelCase )
if key_name.startswith("""model/wte""" ):
snake_case_ : Optional[int] = """lm_head.weight"""
snake_case_ : Tuple = vnp.copy() # same in embedded
snake_case_ : Union[str, Any] = torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/wob""" ):
snake_case_ : List[str] = """final_logits_bias"""
snake_case_ : Optional[int] = vnp.copy() # same in embedded
snake_case_ : List[str] = state.reshape((1, -1) )
snake_case_ : str = torch.tensor(__UpperCamelCase )
elif key_name == "model/dense/kernel":
snake_case_ : Tuple = """model.last_project.weight"""
snake_case_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Tuple = torch.tensor(__UpperCamelCase )
elif key_name == "model/dense_1/bias":
snake_case_ : Tuple = """model.last_project.bias"""
snake_case_ : Tuple = vnp.copy() # same because it is one dimensional
snake_case_ : int = torch.tensor(__UpperCamelCase )
torch.save(__UpperCamelCase , args.output )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__lowerCAmelCase : Any = False
__lowerCAmelCase : Tuple = False
def __lowerCAmelCase ( __UpperCamelCase : Namespace ):
'''simple docstring'''
return TrainCommand(__UpperCamelCase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : str = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=_lowercase , required=_lowercase , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=_lowercase , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=_lowercase , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=_lowercase , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=_lowercase , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=_lowercase , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=_lowercase , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=_lowercase , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=_lowercase , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=_lowercase , default=3_2 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=_lowercase , default=6_4 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=_lowercase , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=_lowercase , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = logging.get_logger("""transformers-cli/training""" )
snake_case_ : Dict = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=_lowercase )
snake_case_ : List[Any] = args.output
snake_case_ : Optional[int] = args.column_label
snake_case_ : List[str] = args.column_text
snake_case_ : int = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
snake_case_ : Any = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
snake_case_ : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case_ : Tuple = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
snake_case_ : List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case_ : Dict = args.validation_split
snake_case_ : Tuple = args.train_batch_size
snake_case_ : List[Any] = args.valid_batch_size
snake_case_ : List[Any] = args.learning_rate
snake_case_ : str = args.adam_epsilon
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swinv2'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Optional[int] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : str = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : List[str] = depths
snake_case_ : Any = len(_lowercase )
snake_case_ : Union[str, Any] = num_heads
snake_case_ : Dict = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Tuple = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[Any] = drop_path_rate
snake_case_ : Optional[int] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = initializer_range
snake_case_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Optional[Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : List[str] = (0, 0, 0, 0)
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCAmelCase : int = get_tests_dir('''fixtures''')
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = mock.Mock()
snake_case_ : Dict = 5_0_0
snake_case_ : List[str] = {}
snake_case_ : Optional[int] = HTTPError
snake_case_ : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head:
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def UpperCAmelCase__ ( cls ) -> int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
snake_case_ : Dict = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id="""test-feature-extractor""" , push_to_hub=_lowercase , use_auth_token=self._token )
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=_lowercase , use_auth_token=self._token )
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : List[Any] = CustomFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
snake_case_ : Tuple = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = 1 / 2_5_5 , _lowercase = True , _lowercase = 8 , **_lowercase , ) -> None:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = do_rescale
snake_case_ : List[str] = rescale_factor
snake_case_ : Optional[int] = do_pad
snake_case_ : List[str] = pad_size
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Any = get_image_size(_lowercase )
snake_case_ : List[str] = (old_height // size + 1) * size - old_height
snake_case_ : int = (old_width // size + 1) * size - old_width
return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
snake_case_ : int = pad_size if pad_size is not None else self.pad_size
snake_case_ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_lowercase ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_pad:
snake_case_ : Union[str, Any] = [self.pad(_lowercase , size=_lowercase ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case_ : str = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase : Any = logging.getLogger()
def __lowerCAmelCase ( __UpperCamelCase : Path , __UpperCamelCase : list ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(__UpperCamelCase )
Path(__UpperCamelCase ).open("""w""" ).writelines(__UpperCamelCase )
__lowerCAmelCase : Dict = '''patrickvonplaten/t5-tiny-random'''
__lowerCAmelCase : List[str] = '''sshleifer/bart-tiny-random'''
__lowerCAmelCase : List[str] = '''sshleifer/tiny-mbart'''
__lowerCAmelCase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
snake_case_ : Optional[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
snake_case_ : Any = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_lowercase , _lowercase )
snake_case_ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
snake_case_ : int = """translation_en_to_de""" if model == T5_TINY else """summarization"""
snake_case_ : List[str] = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_lowercase , """argv""" , _lowercase ):
run_generate()
assert Path(_lowercase ).exists()
# os.remove(Path(output_file_name))
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.run_eval_tester(_lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
self.run_eval_tester(_lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
snake_case_ : Dict = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
snake_case_ : Optional[int] = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
snake_case_ : Dict = Path(self.get_auto_remove_tmp_dir() )
snake_case_ : int = str(tmp_dir / """scores.json""" )
snake_case_ : List[str] = str(tmp_dir / """val.target""" )
_dump_articles(_lowercase , text["""en"""] )
_dump_articles(_lowercase , text["""de"""] )
snake_case_ : Optional[int] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
snake_case_ : Optional[Any] = f'\n run_eval_search.py\n {model}\n {str(_lowercase )}\n {str(_lowercase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_lowercase , """argv""" , _lowercase ):
with CaptureStdout() as cs:
run_search()
snake_case_ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
snake_case_ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowercase ).exists()
os.remove(Path(_lowercase ) )
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
snake_case_ : List[str] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
snake_case_ : Optional[int] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
snake_case_ : Optional[int] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
snake_case_ : List[str] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
snake_case_ : List[str] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
snake_case_ : Dict = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ : List[str] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
snake_case_ : Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
snake_case_ : Tuple = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
snake_case_ : List[Any] = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
snake_case_ : Optional[int] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ : List[Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
snake_case_ : Dict = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
snake_case_ : Optional[int] = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
snake_case_ : Tuple = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
snake_case_ : Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
snake_case_ : Tuple = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
snake_case_ : List[str] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
snake_case_ : Dict = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
snake_case_ : Tuple = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ : List[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
snake_case_ : Dict = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
snake_case_ : Optional[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
snake_case_ : int = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ : str = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ : Any = key.split(""".""" )
snake_case_ , snake_case_ : Union[str, Any] = int(key_split[2] ), int(key_split[4] )
snake_case_ : Dict = config.vision_config.hidden_size
if "weight" in key:
snake_case_ : str = val[:dim, :]
snake_case_ : Optional[int] = val[dim : dim * 2, :]
snake_case_ : Optional[int] = val[-dim:, :]
else:
snake_case_ : List[str] = val[:dim]
snake_case_ : Optional[Any] = val[dim : dim * 2]
snake_case_ : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ : Dict = key.split(""".""" )
snake_case_ : str = int(key_split[3] )
snake_case_ : List[str] = config.text_config.hidden_size
if "weight" in key:
snake_case_ : Optional[Any] = val[:dim, :]
snake_case_ : List[str] = val[
dim : dim * 2, :
]
snake_case_ : Tuple = val[-dim:, :]
else:
snake_case_ : List[str] = val[:dim]
snake_case_ : Tuple = val[dim : dim * 2]
snake_case_ : Tuple = val[-dim:]
else:
snake_case_ : List[str] = rename_key(__UpperCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ : Any = val.squeeze_()
else:
snake_case_ : Any = val
return orig_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : Dict = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]="groupvit-gcc-yfcc" , __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Any = GroupViTConfig()
snake_case_ : Union[str, Any] = GroupViTModel(__UpperCamelCase ).eval()
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
snake_case_ : List[Any] = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
snake_case_ , snake_case_ : int = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase ) == 0)
# verify result
snake_case_ : List[str] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ : List[str] = prepare_img()
snake_case_ : Optional[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="""pt""" )
with torch.no_grad():
snake_case_ : Dict = model(**__UpperCamelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ : Union[str, Any] = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ : Any = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1E-3 )
processor.save_pretrained(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print("""Successfully saved processor and model to""" , __UpperCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
model.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCAmelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCAmelCase : List[Any] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
snake_case_ : str = self.transformer_dir
shutil.copy(
os.path.join(_lowercase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
snake_case_ : str = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
snake_case_ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
snake_case_ : Any = black.format_str(_lowercase , mode=_lowercase )
snake_case_ : Tuple = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_lowercase , """w""" , newline="""\n""" ) as f:
f.write(_lowercase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowercase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowercase )
with open(_lowercase , """r""" ) as f:
self.assertTrue(f.read() , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _lowercase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _lowercase ) , )
# Copy consistency with a really long name
snake_case_ : Any = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , _lowercase , _lowercase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _lowercase , overwrite_result=re.sub("""Bert""" , """TestModel""" , _lowercase ) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
snake_case_ : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
snake_case_ : List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case_ : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
snake_case_ , snake_case_ : Optional[Any] = check_copies.convert_to_localized_md(
_lowercase , _lowercase , localized_readme["""format_model_list"""] )
self.assertFalse(_lowercase )
self.assertEqual(_lowercase , _lowercase )
snake_case_ , snake_case_ : List[Any] = check_copies.convert_to_localized_md(
_lowercase , _lowercase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowercase )
snake_case_ : Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
snake_case_ : Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case_ : Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
snake_case_ , snake_case_ : Optional[Any] = check_copies.convert_to_localized_md(
_lowercase , _lowercase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_lowercase , _lowercase )
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = [
[],
[],
[],
]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(_lowercase )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = []
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
if len(self.queue ) == 1_0_0:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
snake_case_ : Any = min(self.queue )
self.queue.remove(_lowercase )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(__UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(__UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCAmelCase : Dict = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case_ : List[Any] = bs[:]
snake_case_ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : Union[str, Any] = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[str] = set()
snake_case_ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : List[str] = char
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
snake_case_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : Dict = json.load(_lowercase )
snake_case_ : Optional[int] = {v: k for k, v in self.encoder.items()}
snake_case_ : Optional[Any] = errors # how to handle errors in decoding
snake_case_ : str = bytes_to_unicode()
snake_case_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
snake_case_ : Dict = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : List[str] = {}
snake_case_ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : Dict = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : Union[str, Any] = tuple(_lowercase )
snake_case_ : Optional[int] = get_pairs(_lowercase )
if not pairs:
return token
while True:
snake_case_ : Tuple = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : str = bigram
snake_case_ : List[Any] = []
snake_case_ : str = 0
while i < len(_lowercase ):
try:
snake_case_ : Optional[Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Dict = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Optional[int] = tuple(_lowercase )
snake_case_ : Any = new_word
if len(_lowercase ) == 1:
break
else:
snake_case_ : Any = get_pairs(_lowercase )
snake_case_ : List[Any] = """ """.join(_lowercase )
snake_case_ : int = word
return word
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = []
for token in re.findall(self.pat , _lowercase ):
snake_case_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = """""".join(_lowercase )
snake_case_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : str = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
snake_case_ : str = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : List[Any] = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
snake_case_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
snake_case_ : List[str] = """ """ + text
return (text, kwargs)
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowerCAmelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__lowerCAmelCase : List[Any] = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__lowerCAmelCase : Any = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Optional[int] = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = float(pearsonr(__UpperCamelCase , __UpperCamelCase )[0] )
snake_case_ : Optional[int] = float(spearmanr(__UpperCamelCase , __UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowercase , _lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowercase , _lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''vit'''
def __init__( self , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=2_2_4 , _lowercase=1_6 , _lowercase=3 , _lowercase=True , _lowercase=1_6 , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[Any] = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : int = image_size
snake_case_ : Tuple = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = encoder_stride
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if openai_config_file == "":
snake_case_ : List[Any] = OpenAIGPTConfig()
else:
snake_case_ : Optional[int] = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
snake_case_ : List[Any] = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
snake_case_ : List[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
snake_case_ : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __UpperCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gpt_neox'''
def __init__( self , _lowercase=5_0_4_3_2 , _lowercase=6_1_4_4 , _lowercase=4_4 , _lowercase=6_4 , _lowercase=2_4_5_7_6 , _lowercase="gelu" , _lowercase=0.25 , _lowercase=1_0_0_0_0 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=2_0_4_8 , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=True , _lowercase=0 , _lowercase=2 , _lowercase=False , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : Any = vocab_size
snake_case_ : int = max_position_embeddings
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : int = rotary_pct
snake_case_ : Tuple = rotary_emb_base
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Optional[int] = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : str = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Dict = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : Optional[Any] = use_parallel_residual
snake_case_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
snake_case_ : Dict = self.rope_scaling.get("""type""" , _lowercase )
snake_case_ : str = self.rope_scaling.get("""factor""" , _lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase = None , _lowercase = None , _lowercase=None , _lowercase=None ) -> Optional[Any]:
'''simple docstring'''
if not conversation_id:
snake_case_ : Tuple = uuid.uuida()
if past_user_inputs is None:
snake_case_ : List[str] = []
if generated_responses is None:
snake_case_ : List[str] = []
snake_case_ : uuid.UUID = conversation_id
snake_case_ : List[str] = past_user_inputs
snake_case_ : List[str] = generated_responses
snake_case_ : Optional[str] = text
def __eq__( self , _lowercase ) -> Any:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False ) -> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
snake_case_ : Union[str, Any] = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
snake_case_ : Union[str, Any] = text
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ : int = None
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
self.generated_responses.append(_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
snake_case_ : Tuple = """user""" if is_user else """bot"""
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
snake_case_ : str = self.tokenizer.eos_token
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ) -> str:
'''simple docstring'''
snake_case_ : int = {}
snake_case_ : Tuple = {}
snake_case_ : Optional[Any] = {}
if min_length_for_response is not None:
snake_case_ : str = min_length_for_response
if minimum_tokens is not None:
snake_case_ : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ : Tuple = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ : Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _lowercase , _lowercase=0 , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : str = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase=3_2 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
snake_case_ : Union[str, Any] = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ : List[str] = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
snake_case_ : int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ : Optional[int] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase__ ( self , _lowercase , _lowercase=1_0 , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length )
snake_case_ : List[str] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
snake_case_ : List[str] = max_length - minimum_tokens
snake_case_ : Dict = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ : List[Any] = model_inputs["""attention_mask"""][:, -trim:]
snake_case_ : List[Any] = model_inputs.pop("""conversation""" )
snake_case_ : Optional[Any] = max_length
snake_case_ : Optional[Any] = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
snake_case_ : int = 1
else:
snake_case_ : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase__ ( self , _lowercase , _lowercase=True ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = model_outputs["""output_ids"""]
snake_case_ : List[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
snake_case_ : List[Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.tokenizer.eos_token_id
snake_case_ : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
snake_case_ : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = []
snake_case_ : Optional[int] = []
for i in range(self.num_layers ):
snake_case_ : List[str] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Optional[int] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
snake_case_ : Any = resnets
snake_case_ : Any = attentions
if self.add_downsample:
snake_case_ : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : List[Any] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
snake_case_ : Optional[int] = attn(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : List[Any] = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : int = self.in_channels if i == 0 else self.out_channels
snake_case_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Dict = resnets
if self.add_downsample:
snake_case_ : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = ()
for resnet in self.resnets:
snake_case_ : List[Any] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
for i in range(self.num_layers ):
snake_case_ : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Tuple = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
snake_case_ : Union[str, Any] = resnets
snake_case_ : Any = attentions
if self.add_upsample:
snake_case_ : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Union[str, Any] = res_hidden_states_tuple[-1]
snake_case_ : Optional[Any] = res_hidden_states_tuple[:-1]
snake_case_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : List[str] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
snake_case_ : int = attn(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
snake_case_ : List[Any] = self.upsamplers_a(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = True
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : List[str] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Optional[int] = resnets
if self.add_upsample:
snake_case_ : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Optional[int]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Optional[Any] = res_hidden_states_tuple[-1]
snake_case_ : List[str] = res_hidden_states_tuple[:-1]
snake_case_ : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Optional[int] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
snake_case_ : int = self.upsamplers_a(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 0.0
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = jnp.floataa
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : Tuple = []
for _ in range(self.num_layers ):
snake_case_ : List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
snake_case_ : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
snake_case_ : Optional[Any] = resnets
snake_case_ : List[Any] = attentions
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.resnets[0](_lowercase , _lowercase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Dict = attn(_lowercase , _lowercase , deterministic=_lowercase )
snake_case_ : Optional[int] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
return hidden_states
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
import pprint
import requests
__lowerCAmelCase : Optional[Any] = '''https://zenquotes.io/api'''
def __lowerCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowerCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Dict = random_quotes()
pprint.pprint(response)
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = (DPMSolverSDEScheduler,)
_lowerCamelCase = 10
def UpperCAmelCase__ ( self , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_lowercase )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Tuple = self.dummy_model()
snake_case_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Union[str, Any] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[str] = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : Tuple = model(_lowercase , _lowercase )
snake_case_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : Optional[int] = output.prev_sample
snake_case_ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : str = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case_ : Any = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : List[str] = self.dummy_model()
snake_case_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Any = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[Any] = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : List[str] = model(_lowercase , _lowercase )
snake_case_ : List[Any] = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : List[str] = output.prev_sample
snake_case_ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : List[str] = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
snake_case_ : Optional[Any] = self.dummy_model()
snake_case_ : Union[str, Any] = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ : Dict = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : Optional[int] = model(_lowercase , _lowercase )
snake_case_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : Union[str, Any] = output.prev_sample
snake_case_ : List[Any] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Union[str, Any] = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : int = self.get_scheduler_config()
snake_case_ : List[Any] = scheduler_class(**_lowercase , use_karras_sigmas=_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
snake_case_ : int = self.dummy_model()
snake_case_ : Tuple = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
snake_case_ : List[str] = sample.to(_lowercase )
for t in scheduler.timesteps:
snake_case_ : int = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : Any = model(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : Optional[int] = output.prev_sample
snake_case_ : Union[str, Any] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : List[str] = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __lowerCAmelCase ( __UpperCamelCase : int = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda __UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.