code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not is_accelerate_available():
return method
_lowerCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *__UpperCAmelCase , **__UpperCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCAmelCase , **__UpperCAmelCase )
return wrapper
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 0
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
snake_case__ = 0B101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
snake_case__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = WATERMARK_BITS
_lowerCamelCase = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
_lowerCamelCase = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCamelCase = [self.encoder.encode(A_ , '''dwtDct''' ) for image in images]
_lowerCamelCase = torch.from_numpy(np.array(A_ ) ).permute(0 , 3 , 1 , 2 )
_lowerCamelCase = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 719
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 720
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase__ ( __lowercase ):
'''simple docstring'''
A_ = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ = Features({'question': Value('string' ), 'context': Value('string' )} )
A_ = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A_ = 'question'
A_ = 'context'
A_ = 'answers'
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=6 , A_=17 , A_=23 , A_=11 , A_=True , ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = act_dim
_lowerCamelCase = state_dim
_lowerCamelCase = hidden_size
_lowerCamelCase = max_length
_lowerCamelCase = is_training
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
_lowerCamelCase = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = DecisionTransformerModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , A_ , A_ , A_ , A_ , A_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (DecisionTransformerModel,) if is_torch_available() else ()
A_ = ()
A_ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = DecisionTransformerModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = DecisionTransformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(A_ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A_ )] , A_ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = 2 # number of steps of autoregressive prediction we will perform
_lowerCamelCase = 10 # defined by the RL environment, may be normalized
_lowerCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_lowerCamelCase = model.to(A_ )
_lowerCamelCase = model.config
torch.manual_seed(0 )
_lowerCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=A_ , dtype=torch.floataa ) # env.reset()
_lowerCamelCase = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A_ )
_lowerCamelCase = torch.tensor(A_ , device=A_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_lowerCamelCase = state
_lowerCamelCase = torch.zeros(1 , 0 , config.act_dim , device=A_ , dtype=torch.floataa )
_lowerCamelCase = torch.zeros(1 , 0 , device=A_ , dtype=torch.floataa )
_lowerCamelCase = torch.tensor(0 , device=A_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(A_ ):
_lowerCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A_ )] , dim=1 )
_lowerCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=A_ )] , dim=1 )
_lowerCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = model(
states=A_ , actions=A_ , rewards=A_ , returns_to_go=A_ , timesteps=A_ , attention_mask=A_ , return_dict=A_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A_ , dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCamelCase = action_pred[0, -1]
_lowerCamelCase = torch.cat([states, state] , dim=1 )
_lowerCamelCase = returns_to_go[0, -1] - reward
_lowerCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_lowerCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=A_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 700
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['input_features']
def __init__( self , A_=80 , A_=1_60_00 , A_=1_60 , A_=30 , A_=4_00 , A_=0.0 , A_=False , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , return_attention_mask=A_ , **A_ , )
_lowerCamelCase = n_fft
_lowerCamelCase = hop_length
_lowerCamelCase = chunk_length
_lowerCamelCase = chunk_length * sampling_rate
_lowerCamelCase = self.n_samples // hop_length
_lowerCamelCase = sampling_rate
_lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=A_ , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCamelCase_ ( self , A_ ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase = spectrogram(
A_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
_lowerCamelCase = log_spec[:, :-1]
_lowerCamelCase = np.maximum(A_ , log_spec.max() - 8.0 )
_lowerCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( A_ , A_ , A_ = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
_lowerCamelCase = np.array(A_ , np.intaa )
_lowerCamelCase = []
for vector, length in zip(A_ , attention_mask.sum(-1 ) ):
_lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowerCamelCase = padding_value
normed_input_values.append(A_ )
else:
_lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , A_ , A_ = True , A_ = None , A_ = None , A_ = None , A_ = "max_length" , A_ = None , A_ = None , A_ = None , **A_ , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
_lowerCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase = [np.asarray([raw_speech] ).T]
_lowerCamelCase = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
_lowerCamelCase = self.pad(
A_ , padding=A_ , max_length=max_length if max_length else self.n_samples , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
_lowerCamelCase = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
_lowerCamelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
_lowerCamelCase = [self._np_extract_fbank_features(A_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , A_ ):
_lowerCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
else:
_lowerCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowerCamelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_lowerCamelCase = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
def UpperCamelCase_ ( self ) -> Dict[str, Any]:
"""simple docstring"""
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 701
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 0
|
from __future__ import annotations
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: # noqa: E741
'''simple docstring'''
while r - l > 1:
_lowerCamelCase = (l + r) // 2
if v[m] >= key:
_lowerCamelCase = m
else:
_lowerCamelCase = m # noqa: E741
return r
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
return 0
_lowerCamelCase = [0] * len(__UpperCAmelCase )
_lowerCamelCase = 1
_lowerCamelCase = v[0]
for i in range(1 , len(__UpperCAmelCase ) ):
if v[i] < tail[0]:
_lowerCamelCase = v[i]
elif v[i] > tail[length - 1]:
_lowerCamelCase = v[i]
length += 1
else:
_lowerCamelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = '▁'
snake_case__ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
snake_case__ = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
snake_case__ = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
snake_case__ = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ['input_ids', 'attention_mask']
A_ = []
A_ = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
"""simple docstring"""
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase = language_codes
_lowerCamelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCamelCase = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCamelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = load_json(A_ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = spm_file
_lowerCamelCase = load_spm(A_ , self.sp_model_kwargs )
_lowerCamelCase = len(self.encoder )
_lowerCamelCase = {
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
_lowerCamelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
_lowerCamelCase = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCamelCase = src_lang if src_lang is not None else '''en'''
_lowerCamelCase = tgt_lang
_lowerCamelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCamelCase = num_madeup_words
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
_lowerCamelCase = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
_lowerCamelCase = [1] * len(self.prefix_tokens )
_lowerCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = Path(A_ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_lowerCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_lowerCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def UpperCamelCase_ ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase = src_lang
_lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ , **A_ ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCamelCase = src_lang
_lowerCamelCase = self(A_ , add_special_tokens=A_ , **A_ )
_lowerCamelCase = self.get_lang_id(A_ )
_lowerCamelCase = tgt_lang_id
return inputs
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = self.get_lang_token(A_ )
_lowerCamelCase = self.lang_token_to_id[lang_token]
_lowerCamelCase = [self.cur_lang_id]
_lowerCamelCase = [self.eos_token_id]
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = self.get_lang_token(A_ )
_lowerCamelCase = self.lang_token_to_id[lang_token]
_lowerCamelCase = [self.cur_lang_id]
_lowerCamelCase = [self.eos_token_id]
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
_lowerCamelCase = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def __magic_name__( __UpperCAmelCase ) -> Union[Dict, List]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' ) as f:
return json.load(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 703
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case__ = datasets.utils.logging.get_logger(__name__)
class UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ = None
A_ = None
class UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ = datasets.Audio()
A_ = 'audio'
A_ = AudioFolderConfig
A_ = 42 # definition at the bottom of the script
A_ = AudioClassification(audio_column='audio' , label_column='label' )
snake_case__ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
snake_case__ = AUDIO_EXTENSIONS
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 16 , A_ = 88 , A_ = None , A_ = None , A_ = 1 , A_ = 0.0 , A_ = 32 , A_ = None , A_ = False , A_ = None , A_ = "geglu" , A_ = True , A_ = True , ) -> List[str]:
"""simple docstring"""
super().__init__()
_lowerCamelCase = num_attention_heads
_lowerCamelCase = attention_head_dim
_lowerCamelCase = num_attention_heads * attention_head_dim
_lowerCamelCase = in_channels
_lowerCamelCase = torch.nn.GroupNorm(num_groups=A_ , num_channels=A_ , eps=1E-6 , affine=A_ )
_lowerCamelCase = nn.Linear(A_ , A_ )
# 3. Define transformers blocks
_lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
A_ , A_ , A_ , dropout=A_ , cross_attention_dim=A_ , activation_fn=A_ , attention_bias=A_ , double_self_attention=A_ , norm_elementwise_affine=A_ , )
for d in range(A_ )
] )
_lowerCamelCase = nn.Linear(A_ , A_ )
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=None , A_=1 , A_=None , A_ = True , ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = hidden_states.shape
_lowerCamelCase = batch_frames // num_frames
_lowerCamelCase = hidden_states
_lowerCamelCase = hidden_states[None, :].reshape(A_ , A_ , A_ , A_ , A_ )
_lowerCamelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_lowerCamelCase = self.norm(A_ )
_lowerCamelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , A_ , A_ )
_lowerCamelCase = self.proj_in(A_ )
# 2. Blocks
for block in self.transformer_blocks:
_lowerCamelCase = block(
A_ , encoder_hidden_states=A_ , timestep=A_ , cross_attention_kwargs=A_ , class_labels=A_ , )
# 3. Output
_lowerCamelCase = self.proj_out(A_ )
_lowerCamelCase = (
hidden_states[None, None, :]
.reshape(A_ , A_ , A_ , A_ , A_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_lowerCamelCase = hidden_states.reshape(A_ , A_ , A_ , A_ )
_lowerCamelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=A_ )
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['image_processor', 'tokenizer']
A_ = 'LayoutLMv2ImageProcessor'
A_ = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self , A_=None , A_=None , **A_ ) -> Tuple:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A_ , )
_lowerCamelCase = kwargs.pop('''feature_extractor''' )
_lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A_ , A_ )
def __call__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) -> BatchEncoding:
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
_lowerCamelCase = self.image_processor(images=A_ , return_tensors=A_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A_ , A_ ):
_lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCamelCase = features['''words''']
_lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
# add pixel values
_lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_lowerCamelCase = self.get_overflowing_images(A_ , encoded_inputs['''overflow_to_sample_mapping'''] )
_lowerCamelCase = images
return encoded_inputs
def UpperCamelCase_ ( self , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A_ ) != len(A_ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F' {len(A_ )} and {len(A_ )}' )
return images_with_overflow
def UpperCamelCase_ ( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def UpperCamelCase_ ( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A_ , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A_ , )
return self.image_processor
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
snake_case__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCAmelCase ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_lowerCamelCase = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
_lowerCamelCase = PipelineDataFormat.from_str(
format=__UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__UpperCAmelCase , __UpperCAmelCase )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = nlp
_lowerCamelCase = reader
@staticmethod
def UpperCamelCase_ ( A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=A_ , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=A_ , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=A_ , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=A_ , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=A_ , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=A_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=A_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=A_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=A_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self._nlp, []
for entry in self._reader:
_lowerCamelCase = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ )
if isinstance(A_ , A_ ):
outputs.append(A_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_lowerCamelCase = self._reader.save_binary(A_ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(A_ )
| 707
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCamelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCamelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = ' Hello world! cécé herlolip'
UpperCamelCase__ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = torch.load(__UpperCAmelCase , map_location='''cpu''' )
_lowerCamelCase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase = emb.weight.shape
_lowerCamelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
_lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if not os.path.exists(__UpperCAmelCase ):
_lowerCamelCase = torch.hub.load('''pytorch/fairseq''' , __UpperCAmelCase ).eval()
else:
_lowerCamelCase = load_xsum_checkpoint(__UpperCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowerCamelCase = checkpoint_path.replace('''.''' , '''-''' )
_lowerCamelCase = BartConfig.from_pretrained(__UpperCAmelCase )
_lowerCamelCase = bart.encode(__UpperCAmelCase ).unsqueeze(0 )
_lowerCamelCase = BartTokenizer.from_pretrained(__UpperCAmelCase ).encode(__UpperCAmelCase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(__UpperCAmelCase , __UpperCAmelCase ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
_lowerCamelCase = bart.state_dict()
remove_ignore_keys_(__UpperCAmelCase )
_lowerCamelCase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = BartForSequenceClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
_lowerCamelCase = bart.predict('''mnli''' , __UpperCAmelCase , return_logits=__UpperCAmelCase )
_lowerCamelCase = model(__UpperCAmelCase )[0] # logits
else: # no classification heads to worry about
_lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(__UpperCAmelCase )
_lowerCamelCase = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase = bart.extract_features(__UpperCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
_lowerCamelCase = BartModel(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
_lowerCamelCase = model(__UpperCAmelCase ).model[0]
else:
_lowerCamelCase = BartForConditionalGeneration(__UpperCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''lm_head''' ):
_lowerCamelCase = make_linear_from_emb(model.model.shared )
_lowerCamelCase = model.model(__UpperCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 708
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 0
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = metric_id
class UpperCamelCase :
'''simple docstring'''
A_ = [MetricMock(__lowercase ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if "tmp_path" in args:
_lowerCamelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__UpperCAmelCase , match='''https://huggingface.co/docs/evaluate''' ):
func(*__UpperCAmelCase )
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = ReformerTokenizer
A_ = ReformerTokenizerFast
A_ = True
A_ = False
A_ = True
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
_lowerCamelCase = ReformerTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = '''<s>'''
_lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(A_ ) , 10_00 )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase = tokenizer.tokenize(A_ )
_lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(A_ )
_lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self , A_=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
_lowerCamelCase = '''This is a simple input'''
_lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='''max_length''' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='''max_length''' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='''max_length''' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='''max_length''' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='''max_length''' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='''max_length''' , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = ReformerTokenizer(A_ , keep_accents=A_ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [2_85, 46, 10, 1_70, 3_82] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = '''Hello World!'''
_lowerCamelCase = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_lowerCamelCase = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@require_torch
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCamelCase = ''' '''.join(A_ )
_lowerCamelCase = self.big_tokenizer.encode_plus(A_ , return_tensors='''pt''' )
_lowerCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
_lowerCamelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowerCamelCase = encoded_sequence['''input_ids'''].shape
_lowerCamelCase = ReformerModel(A_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A_ )
model(**A_ )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowerCamelCase = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=A_ , sequences=A_ , )
| 711
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 712
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 0
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __magic_name__( __UpperCAmelCase=32 , __UpperCAmelCase=10 , __UpperCAmelCase=100 , __UpperCAmelCase=1026 , __UpperCAmelCase=True , __UpperCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , __UpperCAmelCase="igf_context_pairs.jbl" , ) -> Dict:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_lowerCamelCase , _lowerCamelCase = generate_datasets(
__UpperCAmelCase , __UpperCAmelCase , number=__UpperCAmelCase , min_len=1026 , trim=__UpperCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowerCamelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
_lowerCamelCase = load_gpta('''gpt2''' ).to(__UpperCAmelCase )
print('''computing perplexity on objective set''' )
_lowerCamelCase = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).item()
print('''perplexity on objective set:''' , __UpperCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=15 , __UpperCAmelCase=128 , __UpperCAmelCase=100 , __UpperCAmelCase="igf_model.pt" , ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_lowerCamelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
_lowerCamelCase = SecondaryLearner(__UpperCAmelCase )
# Train secondary learner
_lowerCamelCase = train_secondary_learner(
__UpperCAmelCase , __UpperCAmelCase , max_epochs=__UpperCAmelCase , batch_size=__UpperCAmelCase , eval_freq=100 , igf_model_path=__UpperCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=32 , __UpperCAmelCase=1000 , __UpperCAmelCase=16 , __UpperCAmelCase=1.0 , __UpperCAmelCase=recopy_gpta , __UpperCAmelCase=None , __UpperCAmelCase=10 , __UpperCAmelCase="gpt2_finetuned.pt" , ) -> str:
'''simple docstring'''
_lowerCamelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
_lowerCamelCase = RandomSampler(__UpperCAmelCase )
_lowerCamelCase = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase )
_lowerCamelCase = max_steps // (len(__UpperCAmelCase )) + 1
_lowerCamelCase = 0
_lowerCamelCase = torch.zeros((1, context_len) , dtype=torch.long , device=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = recopy_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__UpperCAmelCase )
secondary_learner.eval()
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = []
_lowerCamelCase = []
# Compute the performance of the transformer model at the beginning
_lowerCamelCase = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
test_perps.append(__UpperCAmelCase )
print('''Test perplexity, step''' , __UpperCAmelCase , ''':''' , __UpperCAmelCase )
for epoch in range(int(__UpperCAmelCase ) ):
for step, example in enumerate(__UpperCAmelCase ):
torch.cuda.empty_cache()
_lowerCamelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
_lowerCamelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowerCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
_lowerCamelCase = True
if secondary_learner is not None:
_lowerCamelCase = secondary_learner.forward(
torch.tensor(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__UpperCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowerCamelCase = -1
if predicted_q < threshold:
_lowerCamelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowerCamelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowerCamelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowerCamelCase = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
test_perps.append(__UpperCAmelCase )
print('''Test perplexity, step''' , __UpperCAmelCase , ''':''' , __UpperCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __UpperCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __magic_name__( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=__UpperCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=__UpperCAmelCase , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=__UpperCAmelCase , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=__UpperCAmelCase , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=__UpperCAmelCase , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=__UpperCAmelCase , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=__UpperCAmelCase , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=__UpperCAmelCase , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=__UpperCAmelCase , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=__UpperCAmelCase , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=__UpperCAmelCase , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=__UpperCAmelCase , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__UpperCAmelCase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
_lowerCamelCase = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
_lowerCamelCase = training_secondary_learner(
__UpperCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
_lowerCamelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowerCamelCase , _lowerCamelCase = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=__UpperCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__UpperCAmelCase , secondary_learner=__UpperCAmelCase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'audio-spectrogram-transformer'
def __init__( self , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-1_2 , A_=16 , A_=True , A_=10 , A_=10 , A_=10_24 , A_=1_28 , **A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**A_ )
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = patch_size
_lowerCamelCase = qkv_bias
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
snake_case__ = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import re
from filelock import FileLock
try:
import nltk
snake_case__ = True
except (ImportError, ModuleNotFoundError):
snake_case__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 716
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
_lowerCamelCase = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowerCamelCase = model(A_ )['''last_hidden_state''']
_lowerCamelCase = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
_lowerCamelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
snake_case__ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
snake_case__ = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = collections.OrderedDict()
_lowerCamelCase = collections.OrderedDict()
_lowerCamelCase = collections.OrderedDict()
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__UpperCAmelCase ):
_lowerCamelCase = b
_lowerCamelCase = idx
for wd in b:
_lowerCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_ , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|startoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , )
if not os.path.isfile(A_ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(A_ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
_lowerCamelCase = do_clean_text
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = load_vocab_and_emoji(A_ , A_ )
_lowerCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.raw_vocab )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(A_ )
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = ''''''.join(A_ ).strip()
return out_string
def UpperCamelCase_ ( self , A_ ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = 0
if os.path.isdir(A_ ):
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
_lowerCamelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
_lowerCamelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''','''.join(A_ ) + '''\n''' )
index += 1
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , A_ )
return vocab_file, emoji_file
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = vocab # same as swe
_lowerCamelCase = ids_to_tokens # same as bpe
_lowerCamelCase = emoji
_lowerCamelCase = np.max([len(A_ ) for w in self.vocab.keys()] )
_lowerCamelCase = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
_lowerCamelCase = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
_lowerCamelCase = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
_lowerCamelCase = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_lowerCamelCase = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_lowerCamelCase = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
_lowerCamelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
_lowerCamelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
_lowerCamelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.content_repattera.sub('''<URL>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<EMAIL>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<TEL>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<DATE>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<DATE>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<PRICE>''' , A_ )
_lowerCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_lowerCamelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def UpperCamelCase_ ( self , A_ , A_=False ) -> Dict:
"""simple docstring"""
_lowerCamelCase = text.replace(''' ''' , '''<SP>''' )
_lowerCamelCase = text.replace(''' ''' , '''<SP>''' )
_lowerCamelCase = text.replace('''\r\n''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\n''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\r''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\t''' , '''<TAB>''' )
_lowerCamelCase = text.replace('''—''' , '''ー''' )
_lowerCamelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
_lowerCamelCase = text.replace(A_ , A_ )
if clean:
_lowerCamelCase = self.clean_text(A_ )
def check_simbol(A_ ):
_lowerCamelCase = x.encode()
if len(A_ ) == 1 and len(A_ ) == 2:
_lowerCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(A_ ):
_lowerCamelCase = x.encode()
if len(A_ ) == 1 and len(A_ ) == 3:
_lowerCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
_lowerCamelCase = 0
_lowerCamelCase = []
while pos < len(A_ ):
_lowerCamelCase = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
_lowerCamelCase = [] # (token_id, token, pos)
for e in range(A_ , A_ , -1 ):
_lowerCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A_ ) > 2:
_lowerCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A_ ) > 0:
# the smallest token_id is adopted
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = sorted(A_ , key=lambda A_ : x[0] )[0]
result.append(A_ )
_lowerCamelCase = e
else:
_lowerCamelCase = pos + 1
_lowerCamelCase = text[pos:end]
if check_simbol(A_ ):
result.append('''<KIGOU>''' )
elif checkuae(A_ ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
_lowerCamelCase = end
return result
def UpperCamelCase_ ( self , A_ , A_="\n" ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('''utf-8''' , errors='''replace''' ) )
_lowerCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(A_ )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(A_ )
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('''utf-8''' , errors='''replace''' ) )
_lowerCamelCase = ''''''.join(A_ )
return text
| 719
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'blip_2_vision_model'
def __init__( self , A_=14_08 , A_=61_44 , A_=39 , A_=16 , A_=2_24 , A_=14 , A_="gelu" , A_=0.00001 , A_=0.0 , A_=1E-1_0 , A_=True , **A_ , ) -> int:
"""simple docstring"""
super().__init__(**A_ )
_lowerCamelCase = hidden_size
_lowerCamelCase = intermediate_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = patch_size
_lowerCamelCase = image_size
_lowerCamelCase = initializer_range
_lowerCamelCase = attention_dropout
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = hidden_act
_lowerCamelCase = qkv_bias
@classmethod
def UpperCamelCase_ ( cls , A_ , **A_ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
_lowerCamelCase , _lowerCamelCase = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
_lowerCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'blip_2_qformer'
def __init__( self , A_=3_05_22 , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=0.02 , A_=1E-1_2 , A_=0 , A_="absolute" , A_=2 , A_=14_08 , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = cross_attention_frequency
_lowerCamelCase = encoder_hidden_size
@classmethod
def UpperCamelCase_ ( cls , A_ , **A_ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
_lowerCamelCase , _lowerCamelCase = cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
_lowerCamelCase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'blip-2'
A_ = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> str:
"""simple docstring"""
super().__init__(**A_ )
if vision_config is None:
_lowerCamelCase = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
_lowerCamelCase = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
_lowerCamelCase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_lowerCamelCase = BlipaVisionConfig(**A_ )
_lowerCamelCase = BlipaQFormerConfig(**A_ )
_lowerCamelCase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_lowerCamelCase = CONFIG_MAPPING[text_model_type](**A_ )
_lowerCamelCase = self.text_config.tie_word_embeddings
_lowerCamelCase = self.text_config.is_encoder_decoder
_lowerCamelCase = num_query_tokens
_lowerCamelCase = self.vision_config.hidden_size
_lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCamelCase = 1.0
_lowerCamelCase = 0.02
@classmethod
def UpperCamelCase_ ( cls , A_ , A_ , A_ , **A_ , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.vision_config.to_dict()
_lowerCamelCase = self.qformer_config.to_dict()
_lowerCamelCase = self.text_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 720
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase__ ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCAmelCase__ ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 3 , A_ = 3 , A_ = ("DownEncoderBlock2D",) , A_ = ("UpDecoderBlock2D",) , A_ = (64,) , A_ = 1 , A_ = "silu" , A_ = 3 , A_ = 32 , A_ = 2_56 , A_ = 32 , A_ = None , A_ = 0.18215 , A_ = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_lowerCamelCase = Encoder(
in_channels=A_ , out_channels=A_ , down_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , act_fn=A_ , norm_num_groups=A_ , double_z=A_ , )
_lowerCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCamelCase = nn.Convad(A_ , A_ , 1 )
_lowerCamelCase = VectorQuantizer(A_ , A_ , beta=0.25 , remap=A_ , sane_index_shape=A_ )
_lowerCamelCase = nn.Convad(A_ , A_ , 1 )
# pass init params to Decoder
_lowerCamelCase = Decoder(
in_channels=A_ , out_channels=A_ , up_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , act_fn=A_ , norm_num_groups=A_ , norm_type=A_ , )
@apply_forward_hook
def UpperCamelCase_ ( self , A_ , A_ = True ) -> VQEncoderOutput:
"""simple docstring"""
_lowerCamelCase = self.encoder(A_ )
_lowerCamelCase = self.quant_conv(A_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=A_ )
@apply_forward_hook
def UpperCamelCase_ ( self , A_ , A_ = False , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
# also go through quantization layer
if not force_not_quantize:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.quantize(A_ )
else:
_lowerCamelCase = h
_lowerCamelCase = self.post_quant_conv(A_ )
_lowerCamelCase = self.decoder(A_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
def UpperCamelCase_ ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
_lowerCamelCase = sample
_lowerCamelCase = self.encode(A_ ).latents
_lowerCamelCase = self.decode(A_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( A_ ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError()
| 700
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 0
|
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 0
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case__ = 'bert-base-cased'
snake_case__ = 'google/pegasus-xsum'
snake_case__ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
snake_case__ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
snake_case__ = 'patrickvonplaten/t5-tiny-random'
snake_case__ = 'sshleifer/bart-tiny-random'
snake_case__ = 'sshleifer/tiny-mbart'
snake_case__ = 'sshleifer/tiny-marian-en-de'
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''\n'''.join(__UpperCAmelCase )
Path(__UpperCAmelCase ).open('''w''' ).writelines(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__UpperCAmelCase , F'{split}.source' ) , __UpperCAmelCase )
_dump_articles(os.path.join(__UpperCAmelCase , F'{split}.target' ) , __UpperCAmelCase )
return tmp_dir
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = AutoTokenizer.from_pretrained(A_ )
_lowerCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES )
_lowerCamelCase = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES )
_lowerCamelCase = 4
_lowerCamelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowerCamelCase , _lowerCamelCase = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
_lowerCamelCase = SeqaSeqDataset(
A_ , data_dir=A_ , type_path='''train''' , max_source_length=A_ , max_target_length=A_ , src_lang=A_ , tgt_lang=A_ , )
_lowerCamelCase = DataLoader(A_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(A_ , A_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowerCamelCase = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCamelCase_ ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = AutoTokenizer.from_pretrained(A_ )
_lowerCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES )
_lowerCamelCase = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES )
_lowerCamelCase = 4
_lowerCamelCase = LegacySeqaSeqDataset(
A_ , data_dir=A_ , type_path='''train''' , max_source_length=20 , max_target_length=A_ , )
_lowerCamelCase = DataLoader(A_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
_lowerCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowerCamelCase = tmp_dir.joinpath('''train.source''' ).open().readlines()
_lowerCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(A_ , A_ , 1_28 , A_ )
_lowerCamelCase = {x.name for x in tmp_dir.iterdir()}
_lowerCamelCase = {x.name for x in save_dir.iterdir()}
_lowerCamelCase = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(A_ ) < len(A_ )
assert len(A_ ) == 1
assert len(packed_examples[0] ) == sum(len(A_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._get_dataset(max_len=64 )
_lowerCamelCase = 64
_lowerCamelCase = ds.make_dynamic_sampler(A_ , required_batch_size_multiple=A_ )
_lowerCamelCase = [len(A_ ) for x in batch_sampler]
assert len(set(A_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(A_ ) == len(A_ ) # no dropped or added examples
_lowerCamelCase = DataLoader(A_ , batch_sampler=A_ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase = []
_lowerCamelCase = []
for batch in data_loader:
_lowerCamelCase = batch['''input_ids'''].shape
_lowerCamelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowerCamelCase = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(A_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(A_ )
assert num_src_per_batch[0] == max(A_ )
if failures:
raise AssertionError(F'too many tokens in {len(A_ )} batches' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._get_dataset(max_len=5_12 )
_lowerCamelCase = 2
_lowerCamelCase = ds.make_sortish_sampler(A_ , shuffle=A_ )
_lowerCamelCase = DataLoader(A_ , batch_size=A_ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase = DataLoader(A_ , batch_size=A_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=A_ )
_lowerCamelCase = tokenizer.pad_token_id
def count_pad_tokens(A_ , A_="input_ids" ):
return [batch[k].eq(A_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(A_ , k='''labels''' ) ) < sum(count_pad_tokens(A_ , k='''labels''' ) )
assert sum(count_pad_tokens(A_ ) ) < sum(count_pad_tokens(A_ ) )
assert len(A_ ) == len(A_ )
def UpperCamelCase_ ( self , A_=10_00 , A_=1_28 ) -> Tuple:
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , A_ ):
_lowerCamelCase = '''examples/seq2seq/wmt_en_ro'''
_lowerCamelCase = max_len * 2 * 64
if not Path(A_ ).joinpath('''train.len''' ).exists():
save_len_file(A_ , A_ )
else:
_lowerCamelCase = '''examples/seq2seq/test_data/wmt_en_ro'''
_lowerCamelCase = max_len * 4
save_len_file(A_ , A_ )
_lowerCamelCase = AutoTokenizer.from_pretrained(A_ )
_lowerCamelCase = SeqaSeqDataset(
A_ , data_dir=A_ , type_path='''train''' , max_source_length=A_ , max_target_length=A_ , n_obs=A_ , )
return ds, max_tokens, tokenizer
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._get_dataset()
_lowerCamelCase = set(DistributedSortishSampler(A_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=A_ ) )
_lowerCamelCase = set(DistributedSortishSampler(A_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=A_ ) )
assert idsa.intersection(A_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = AutoTokenizer.from_pretrained(A_ , use_fast=A_ )
if tok_name == MBART_TINY:
_lowerCamelCase = SeqaSeqDataset(
A_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
_lowerCamelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowerCamelCase = SeqaSeqDataset(
A_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
_lowerCamelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(A_ ) == 1 if tok_name == BART_TINY else len(A_ ) == 0
| 702
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 0
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
A_ = 'pixel_values'
A_ = False
A_ = TimmBackboneConfig
def __init__( self , A_ , **A_ ) -> Any:
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(A_ )
_lowerCamelCase = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(A_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_lowerCamelCase = getattr(A_ , '''use_pretrained_backbone''' , A_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_lowerCamelCase = config.out_indices if getattr(A_ , '''out_indices''' , A_ ) is not None else (-1,)
_lowerCamelCase = timm.create_model(
config.backbone , pretrained=A_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=A_ , **A_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowerCamelCase = self._backbone.return_layers
_lowerCamelCase = {layer['''module''']: str(A_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(A_ )
@classmethod
def UpperCamelCase_ ( cls , A_ , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_lowerCamelCase = kwargs.pop('''config''' , TimmBackboneConfig() )
_lowerCamelCase = kwargs.pop('''use_timm_backbone''' , A_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_lowerCamelCase = kwargs.pop('''num_channels''' , config.num_channels )
_lowerCamelCase = kwargs.pop('''features_only''' , config.features_only )
_lowerCamelCase = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_lowerCamelCase = kwargs.pop('''out_indices''' , config.out_indices )
_lowerCamelCase = TimmBackboneConfig(
backbone=A_ , num_channels=A_ , features_only=A_ , use_pretrained_backbone=A_ , out_indices=A_ , )
return super()._from_config(A_ , **A_ )
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=None , **A_ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
_lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowerCamelCase = self._all_layers
_lowerCamelCase = self._backbone(A_ , **A_ )
_lowerCamelCase = self._return_layers
_lowerCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
_lowerCamelCase = self._backbone(A_ , **A_ )
_lowerCamelCase = None
_lowerCamelCase = tuple(A_ )
_lowerCamelCase = tuple(A_ ) if hidden_states is not None else None
if not return_dict:
_lowerCamelCase = (feature_maps,)
if output_hidden_states:
_lowerCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=A_ , hidden_states=A_ , attentions=A_ )
| 703
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
_lowerCamelCase = torch.load(__UpperCAmelCase )
_lowerCamelCase = WavLMConfigOrig(checkpoint['''cfg'''] )
_lowerCamelCase = WavLMOrig(__UpperCAmelCase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
_lowerCamelCase = WavLMConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = WavLMConfig()
_lowerCamelCase = WavLMModel(__UpperCAmelCase )
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase )
hf_wavlm.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
snake_case__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 0
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def __magic_name__( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = np.max(_outputs , axis=-1 , keepdims=__UpperCAmelCase )
_lowerCamelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCAmelCase )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'sigmoid'
A_ = 'softmax'
A_ = 'none'
@add_end_docstrings(
__lowercase , R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = False
A_ = ClassificationFunction.NONE
def __init__( self , **A_ ) -> str:
"""simple docstring"""
super().__init__(**A_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCamelCase_ ( self , A_=None , A_=None , A_="" , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = tokenizer_kwargs
_lowerCamelCase = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
_lowerCamelCase = self.model.config.return_all_scores
if isinstance(A_ , A_ ) or top_k is None:
_lowerCamelCase = top_k
_lowerCamelCase = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , A_ , )
if return_all_scores:
_lowerCamelCase = None
else:
_lowerCamelCase = 1
if isinstance(A_ , A_ ):
_lowerCamelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *A_ , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = super().__call__(*A_ , **A_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase = '''top_k''' not in kwargs
if isinstance(args[0] , A_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCamelCase_ ( self , A_ , **A_ ) -> Dict[str, GenericTensor]:
"""simple docstring"""
_lowerCamelCase = self.framework
if isinstance(A_ , A_ ):
return self.tokenizer(**A_ , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ) and len(A_ ) == 1 and isinstance(inputs[0] , A_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(A_ , return_tensors=A_ , **A_ )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
return self.model(**A_ )
def UpperCamelCase_ ( self , A_ , A_=None , A_=1 , A_=True ) -> List[Any]:
"""simple docstring"""
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
_lowerCamelCase = self.model.config.function_to_apply
else:
_lowerCamelCase = ClassificationFunction.NONE
_lowerCamelCase = model_outputs['''logits'''][0]
_lowerCamelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase = sigmoid(A_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase = softmax(A_ )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(A_ )
]
if not _legacy:
dict_scores.sort(key=lambda A_ : x["score"] , reverse=A_ )
if top_k is not None:
_lowerCamelCase = dict_scores[:top_k]
return dict_scores
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case__ = HfArgumentParser(InitializationArguments)
snake_case__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
snake_case__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
snake_case__ = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
snake_case__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
snake_case__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 707
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
UpperCamelCase__ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
def run_func(__UpperCAmelCase ):
@wraps(__UpperCAmelCase )
def run_in_eager_mode(*__UpperCAmelCase , **__UpperCAmelCase ):
return func(*__UpperCAmelCase , **__UpperCAmelCase )
@wraps(__UpperCAmelCase )
@tf.function(experimental_compile=__UpperCAmelCase )
def run_in_graph_mode(*__UpperCAmelCase , **__UpperCAmelCase ):
return func(*__UpperCAmelCase , **__UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> ["tf.Tensor"]:
'''simple docstring'''
_lowerCamelCase = random.Random()
_lowerCamelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 'TensorFlow'
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return tf.__version__
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> float:
"""simple docstring"""
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> float:
"""simple docstring"""
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Callable[[], None]:
"""simple docstring"""
_lowerCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_lowerCamelCase = (
hasattr(A_ , '''architectures''' )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
_lowerCamelCase = getattr(A_ , A_ )
_lowerCamelCase = model_cls(A_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_lowerCamelCase = TF_MODEL_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
_lowerCamelCase = config.vocab_size if hasattr(A_ , '''vocab_size''' ) else config.encoder.vocab_size
_lowerCamelCase = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A_ , decoder_input_ids=A_ , training=A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A_ , training=A_ )
_lowerCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Callable[[], None]:
"""simple docstring"""
_lowerCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_lowerCamelCase = (
hasattr(A_ , '''architectures''' )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
_lowerCamelCase = getattr(A_ , A_ )
_lowerCamelCase = model_cls(A_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_lowerCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
_lowerCamelCase = config.vocab_size if hasattr(A_ , '''vocab_size''' ) else config.encoder.vocab_size
_lowerCamelCase = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCamelCase = model(A_ , decoder_input_ids=A_ , labels=A_ , training=A_ )[0]
_lowerCamelCase = tf.gradients(A_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCamelCase = model(A_ , labels=A_ , training=A_ )[0]
_lowerCamelCase = tf.gradients(A_ , model.trainable_variables )
return gradients
_lowerCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self , A_ ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCamelCase = timeit.repeat(
A_ , repeat=self.args.repeat , number=10 , )
return min(A_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def UpperCamelCase_ ( self , A_ ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
_lowerCamelCase = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
_lowerCamelCase = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
_lowerCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCamelCase = nvml.nvmlDeviceGetMemoryInfo(A_ )
_lowerCamelCase = meminfo.used
_lowerCamelCase = Memory(A_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
_lowerCamelCase = None
else:
_lowerCamelCase = measure_peak_memory_cpu(A_ )
_lowerCamelCase = Memory(A_ ) if isinstance(A_ , A_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCamelCase = stop_memory_tracing(A_ )
if memory is None:
_lowerCamelCase = summary.total
else:
_lowerCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
from __future__ import annotations
import queue
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = data
_lowerCamelCase = None
_lowerCamelCase = None
def __magic_name__( ) -> TreeNode:
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
_lowerCamelCase = input('''Enter the value of the root node: ''' ).strip().lower()
_lowerCamelCase = queue.Queue()
_lowerCamelCase = TreeNode(int(__UpperCAmelCase ) )
q.put(__UpperCAmelCase )
while not q.empty():
_lowerCamelCase = q.get()
_lowerCamelCase = F'Enter the left node of {node_found.data}: '
_lowerCamelCase = input(__UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowerCamelCase = TreeNode(int(__UpperCAmelCase ) )
_lowerCamelCase = left_node
q.put(__UpperCAmelCase )
_lowerCamelCase = F'Enter the right node of {node_found.data}: '
_lowerCamelCase = input(__UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowerCamelCase = TreeNode(int(__UpperCAmelCase ) )
_lowerCamelCase = right_node
q.put(__UpperCAmelCase )
raise
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowerCamelCase = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
_lowerCamelCase = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowerCamelCase = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
_lowerCamelCase = []
while not q.empty():
_lowerCamelCase = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowerCamelCase = []
_lowerCamelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__UpperCAmelCase )
_lowerCamelCase = n.left
# end of while means current node doesn't have left child
_lowerCamelCase = stack.pop()
# start to traverse its right child
_lowerCamelCase = n.right
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowerCamelCase = []
_lowerCamelCase = node
while n or stack:
while n:
stack.append(__UpperCAmelCase )
_lowerCamelCase = n.left
_lowerCamelCase = stack.pop()
print(n.data , end=''',''' )
_lowerCamelCase = n.right
def __magic_name__( __UpperCAmelCase ) -> None:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowerCamelCase , _lowerCamelCase = [], []
_lowerCamelCase = node
stacka.append(__UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCamelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def __magic_name__( __UpperCAmelCase = "" , __UpperCAmelCase=50 , __UpperCAmelCase="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
_lowerCamelCase , _lowerCamelCase = divmod(width - len(__UpperCAmelCase ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
snake_case__ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 710
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 0
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if curr_ind == len(__UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCAmelCase ) ):
if valid_connection(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# Insert current vertex into path as next transition
_lowerCamelCase = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCAmelCase , __UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
_lowerCamelCase = -1
return False
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = 0 ) -> list[int]:
'''simple docstring'''
_lowerCamelCase = [-1] * (len(__UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
_lowerCamelCase = _lowerCamelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCAmelCase , __UpperCAmelCase , 1 ) else []
| 711
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 0
|
import requests
from bsa import BeautifulSoup
def __magic_name__( __UpperCAmelCase = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
_lowerCamelCase = BeautifulSoup(requests.get(__UpperCAmelCase ).text , '''html.parser''' )
_lowerCamelCase = soup.findAll('''h1''' )
_lowerCamelCase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCAmelCase , __UpperCAmelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 712
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 0
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case__ = True
except ImportError:
snake_case__ = False
snake_case__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=A_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=A_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , A_=None , *A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = testing
_lowerCamelCase = testing_file
_lowerCamelCase = path
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_lowerCamelCase = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_lowerCamelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A_ , extra_context=A_ , )
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(A_ )
_lowerCamelCase = configuration['''lowercase_modelname''']
_lowerCamelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'{directory}/configuration.json' )
_lowerCamelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ , exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , '''w''' ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(A_ ):
with open(A_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
with open(A_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ , A_ , A_ ):
# Create temp file
_lowerCamelCase , _lowerCamelCase = mkstemp()
_lowerCamelCase = False
with fdopen(A_ , '''w''' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
_lowerCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ , A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ , A_ )
def skip_units(A_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ ):
with open(A_ ) as datafile:
_lowerCamelCase = []
_lowerCamelCase = False
_lowerCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ , A_ , A_ )
_lowerCamelCase = []
elif "# Replace with" in line and "##" not in line:
_lowerCamelCase = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ )
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
from typing import Any
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = data
_lowerCamelCase = None
def __repr__( self ) -> str:
"""simple docstring"""
return F'Node({self.data})'
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
_lowerCamelCase = None
def __iter__( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.head
while node:
yield node.data
_lowerCamelCase = node.next
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> str:
"""simple docstring"""
return "->".join([str(A_ ) for item in self] )
def __getitem__( self , A_ ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , A_ , A_ ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
_lowerCamelCase = self.head
for _ in range(A_ ):
_lowerCamelCase = current.next
_lowerCamelCase = data
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , A_ )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
self.insert_nth(0 , A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
_lowerCamelCase = Node(A_ )
if self.head is None:
_lowerCamelCase = new_node
elif index == 0:
_lowerCamelCase = self.head # link new_node to head
_lowerCamelCase = new_node
else:
_lowerCamelCase = self.head
for _ in range(index - 1 ):
_lowerCamelCase = temp.next
_lowerCamelCase = temp.next
_lowerCamelCase = new_node
def UpperCamelCase_ ( self ) -> None: # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase_ ( self ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self , A_ = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
_lowerCamelCase = self.head # default first node
if index == 0:
_lowerCamelCase = self.head.next
else:
_lowerCamelCase = self.head
for _ in range(index - 1 ):
_lowerCamelCase = temp.next
_lowerCamelCase = temp.next
_lowerCamelCase = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ) -> bool:
"""simple docstring"""
return self.head is None
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = self.head
while current:
# Store the current node's next node.
_lowerCamelCase = current.next
# Make the current node's next point backwards
_lowerCamelCase = prev
# Make the previous node be the current node
_lowerCamelCase = current
# Make the current node the next node (to progress iteration)
_lowerCamelCase = next_node
# Return prev in order to put the head at the end
_lowerCamelCase = prev
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCAmelCase ) == i
linked_list.insert_nth(__UpperCAmelCase , i + 1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCAmelCase ) == 9
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_lowerCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(-8 , 1 ) )
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = [
-9,
100,
Node(7734_5112 ),
'''dlrow olleH''',
7,
5555,
0,
-192.5_5555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
_lowerCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_lowerCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_lowerCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_lowerCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCAmelCase )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
_lowerCamelCase = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__UpperCAmelCase )
print('''\nReading/changing Node data using indexing:''' )
print(F'Element at Position 1: {linked_list[1]}' )
_lowerCamelCase = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__UpperCAmelCase )
print(F'length of linked_list is : {len(__UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
'''simple docstring'''
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = [0] * len(__UpperCAmelCase )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
_lowerCamelCase = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print('''Cycle exists''' )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
snake_case__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 715
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
snake_case__ = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
snake_case__ = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
snake_case__ = {f'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = FunnelTokenizer
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = 2
def __init__( self , A_=None , A_=None , A_=True , A_="<unk>" , A_="<sep>" , A_="<pad>" , A_="<cls>" , A_="<mask>" , A_="<s>" , A_="</s>" , A_=True , A_=True , A_=None , A_="##" , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , bos_token=A_ , eos_token=A_ , clean_text=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , wordpieces_prefix=A_ , **A_ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(A_ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**A_ )
_lowerCamelCase = do_lower_case
def UpperCamelCase_ ( self , A_ , A_=None ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 716
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 0
|
def __magic_name__( __UpperCAmelCase ) -> int: # noqa: E741
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = 0
_lowerCamelCase = [0] * n
_lowerCamelCase = [False] * n
_lowerCamelCase = [False] * n
def dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if parent == root:
out_edge_count += 1
_lowerCamelCase = True
_lowerCamelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCamelCase = dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCamelCase = True
# AP found via cycle
if at == low[to]:
_lowerCamelCase = True
else:
_lowerCamelCase = min(low[at] , __UpperCAmelCase )
return out_edge_count
for i in range(__UpperCAmelCase ):
if not visited[i]:
_lowerCamelCase = 0
_lowerCamelCase = dfs(__UpperCAmelCase , __UpperCAmelCase , -1 , __UpperCAmelCase )
_lowerCamelCase = out_edge_count > 1
for x in range(len(__UpperCAmelCase ) ):
if is_art[x] is True:
print(__UpperCAmelCase )
# Adjacency list of graph
snake_case__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'mobilenet_v2'
def __init__( self , A_=3 , A_=2_24 , A_=1.0 , A_=8 , A_=8 , A_=6 , A_=32 , A_=True , A_=True , A_="relu6" , A_=True , A_=0.8 , A_=0.02 , A_=0.001 , A_=2_55 , **A_ , ) -> str:
"""simple docstring"""
super().__init__(**A_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = depth_multiplier
_lowerCamelCase = depth_divisible_by
_lowerCamelCase = min_depth
_lowerCamelCase = expand_ratio
_lowerCamelCase = output_stride
_lowerCamelCase = first_layer_is_expansion
_lowerCamelCase = finegrained_output
_lowerCamelCase = hidden_act
_lowerCamelCase = tf_padding
_lowerCamelCase = classifier_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = semantic_loss_ignore_index
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 719
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> str:
"""simple docstring"""
_lowerCamelCase = path_or_paths
_lowerCamelCase = split if split or isinstance(A_ , A_ ) else '''train'''
_lowerCamelCase = features
_lowerCamelCase = cache_dir
_lowerCamelCase = keep_in_memory
_lowerCamelCase = streaming
_lowerCamelCase = num_proc
_lowerCamelCase = kwargs
@abstractmethod
def UpperCamelCase_ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Any:
"""simple docstring"""
_lowerCamelCase = features
_lowerCamelCase = cache_dir
_lowerCamelCase = keep_in_memory
_lowerCamelCase = streaming
_lowerCamelCase = num_proc
_lowerCamelCase = kwargs
@abstractmethod
def UpperCamelCase_ ( self ) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 720
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 0
|
snake_case__ = range(2, 20 + 1)
snake_case__ = [10**k for k in range(ks[-1] + 1)]
snake_case__ = {}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = sum(a_i[j] for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ) )
_lowerCamelCase = sum(a_i[j] * base[j] for j in range(min(len(__UpperCAmelCase ) , __UpperCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase = 0, 0
_lowerCamelCase = n - i
_lowerCamelCase = memo.get(__UpperCAmelCase )
if sub_memo is not None:
_lowerCamelCase = sub_memo.get(__UpperCAmelCase )
if jumps is not None and len(__UpperCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase = -1
for _k in range(len(__UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase = diff + c
for j in range(min(__UpperCAmelCase , len(__UpperCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
if new_c > 0:
add(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_lowerCamelCase = []
else:
_lowerCamelCase = {c: []}
_lowerCamelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase = next_term(__UpperCAmelCase , k - 1 , i + dn , __UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase = compute(__UpperCAmelCase , __UpperCAmelCase , i + dn , __UpperCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase = 0
while j < len(__UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0, 0, 0
for j in range(len(__UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase = ds_c + ds_b
diff += addend
_lowerCamelCase = 0
for j in range(__UpperCAmelCase ):
_lowerCamelCase = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return diff, i - start_i
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
_lowerCamelCase = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
_lowerCamelCase = addend // 10 + quotient
else:
_lowerCamelCase = s
_lowerCamelCase = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
digits.append(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase = 10**15 ) -> int:
'''simple docstring'''
_lowerCamelCase = [1]
_lowerCamelCase = 1
_lowerCamelCase = 0
while True:
_lowerCamelCase , _lowerCamelCase = next_term(__UpperCAmelCase , 20 , i + dn , __UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase = 0
for j in range(len(__UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str = "AAPL" ) -> str:
'''simple docstring'''
_UpperCAmelCase = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase ).text , 'html.parser' )
_UpperCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 639
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = XLMRobertaTokenizer
UpperCamelCase = XLMRobertaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = XLMRobertaTokenizer(A , keep_accents=A)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A) , A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A) , A)
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(A) , 10_02)
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02)
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = XLMRobertaTokenizer(A , keep_accents=A)
_UpperCAmelCase = tokenizer.tokenize('This is a test')
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCAmelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(A)
_UpperCAmelCase = tokenizer_p.save_pretrained(A)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
_UpperCAmelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(A , A)
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(A)
_UpperCAmelCase = tokenizer_p.from_pretrained(A)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A)
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(A , legacy_format=A)
_UpperCAmelCase = tokenizer_p.save_pretrained(A)
# Checks it save with the same files
self.assertSequenceEqual(A , A)
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(A)
_UpperCAmelCase = tokenizer_p.from_pretrained(A)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A))
shutil.rmtree(A)
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(A , legacy_format=A)
_UpperCAmelCase = tokenizer_p.save_pretrained(A)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(A)
_UpperCAmelCase = tokenizer_p.from_pretrained(A)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A))
shutil.rmtree(A)
@cached_property
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name)
_UpperCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=A)
_UpperCAmelCase = pickle.dumps(A)
pickle.loads(A)
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = rust_tokenizer.tokenize(A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
_UpperCAmelCase = rust_tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(A)
_UpperCAmelCase = rust_tokenizer.encode(A)
self.assertListEqual(A , A)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'Hello World!'
_UpperCAmelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A))
@slow
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_UpperCAmelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A))
@slow
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = {'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
UpperCAmelCase__ = random.Random()
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int=1.0 , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[int]=None ) -> Tuple:
'''simple docstring'''
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : int , A : Any , A : Optional[int]=7 , A : str=4_00 , A : int=20_00 , A : str=1 , A : Optional[int]=0.0 , A : List[Any]=1_60_00 , A : int=True , A : List[str]=80 , A : List[Any]=16 , A : Any=64 , A : Optional[Any]="hann_window" , A : Tuple=80 , A : Union[str, Any]=76_00 , A : Any=1E-10 , A : Optional[int]=True , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = do_normalize
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = hop_length
_UpperCAmelCase = win_length
_UpperCAmelCase = win_function
_UpperCAmelCase = fmin
_UpperCAmelCase = fmax
_UpperCAmelCase = mel_floor
_UpperCAmelCase = return_attention_mask
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self : int , A : List[str]=False , A : Any=False) -> str:
"""simple docstring"""
def _flatten(A : List[str]):
return list(itertools.chain(*A))
if equal_length:
_UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCAmelCase = [np.asarray(A) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self : str , A : Tuple=False , A : Any=False) -> Dict:
"""simple docstring"""
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCAmelCase = [np.asarray(A) for x in speech_inputs]
return speech_inputs
@require_torch
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = SpeechTaFeatureExtractor
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = SpeechTaFeatureExtractionTester(self)
def _lowerCamelCase ( self : str , A : Optional[int]) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(A , axis=0) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(A , axis=0) - 1) < 1E-3))
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = [np.asarray(A) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np').input_values
_UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(A , A , atol=1E-3))
# Test batched
_UpperCAmelCase = feat_extract(A , return_tensors='np').input_values
_UpperCAmelCase = feat_extract(A , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(A , A):
self.assertTrue(np.allclose(A , A , atol=1E-3))
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(A , A):
_UpperCAmelCase = feat_extract(A , padding=A , max_length=A , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = range(8_00 , 14_00 , 2_00)
_UpperCAmelCase = [floats_list((1, x))[0] for x in lengths]
_UpperCAmelCase = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(A , A):
_UpperCAmelCase = feat_extract(A , max_length=A , padding=A)
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = feat_extract(
A , truncation=A , max_length=10_00 , padding='max_length' , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = feat_extract(
A , truncation=A , max_length=10_00 , padding='longest' , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = feat_extract(
A , truncation=A , max_length=20_00 , padding='longest' , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = np.random.rand(1_00).astype(np.floataa)
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = [np.asarray(A) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(audio_target=A , padding=A , return_tensors='np').input_values
self.assertTrue(input_values.ndim == 3)
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins)
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np').input_values
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(A , A , atol=1E-3))
# Test batched
_UpperCAmelCase = feature_extractor(A , return_tensors='np').input_values
_UpperCAmelCase = feature_extractor(A , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(A , A):
self.assertTrue(np.allclose(A , A , atol=1E-3))
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
_UpperCAmelCase = np.asarray(A)
_UpperCAmelCase = feature_extractor(A , return_tensors='np').input_values
_UpperCAmelCase = feature_extractor(A , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(A , A):
self.assertTrue(np.allclose(A , A , atol=1E-3))
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(A) == len(A) for x, y in zip(A , processed_features[input_name])))
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A)
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='np')
_UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A)
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
_UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs})
_UpperCAmelCase = feat_extract.num_mel_bins # hack!
_UpperCAmelCase = feat_extract.pad(A , padding='longest' , return_tensors='np')[input_name]
_UpperCAmelCase = feat_extract.pad(A , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.feat_extract_dict
_UpperCAmelCase = True
_UpperCAmelCase = self.feature_extraction_class(**A)
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase = [len(A) for x in speech_inputs]
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs})
_UpperCAmelCase = feat_extract.num_mel_bins # hack!
_UpperCAmelCase = feat_extract.pad(A , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , A)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , A)
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.feat_extract_dict
_UpperCAmelCase = True
_UpperCAmelCase = self.feature_extraction_class(**A)
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase = [len(A) for x in speech_inputs]
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs})
_UpperCAmelCase = min(A)
_UpperCAmelCase = feat_extract.num_mel_bins # hack!
_UpperCAmelCase = feat_extract.pad(
A , padding='max_length' , max_length=A , truncation=A , return_tensors='np')
self.assertIn('attention_mask' , A)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
def _lowerCamelCase ( self : List[Any] , A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
from datasets import load_dataset
_UpperCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort('id').select(range(A))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03])
# fmt: on
_UpperCAmelCase = self._load_datasamples(1)
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = feature_extractor(A , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 9_36_80))
self.assertTrue(torch.allclose(input_values[0, :30] , A , atol=1E-6))
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8])
# fmt: on
_UpperCAmelCase = self._load_datasamples(1)
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = feature_extractor(audio_target=A , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 3_66, 80))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A , atol=1E-4))
| 639
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 1
|
def A ( _UpperCAmelCase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(_UpperCAmelCase ) ):
series.append(F"1/{temp + 1}" if series else '1' )
return series
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 639
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( A , A , A ):
@register_to_config
def __init__( self : Optional[int] , A : int , A : int , A : int , A : float , A : int , A : int , A : int , A : int , A : str , A : bool = False , ) -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Embedding(A , A)
_UpperCAmelCase = nn.Embedding(A , A)
_UpperCAmelCase = False
_UpperCAmelCase = nn.Dropout(p=A)
_UpperCAmelCase = TaConfig(
vocab_size=A , d_model=A , num_heads=A , d_kv=A , d_ff=A , dropout_rate=A , feed_forward_proj=A , is_decoder=A , is_encoder_decoder=A , )
_UpperCAmelCase = nn.ModuleList()
for lyr_num in range(A):
_UpperCAmelCase = TaBlock(A)
self.encoders.append(A)
_UpperCAmelCase = TaLayerNorm(A)
_UpperCAmelCase = nn.Dropout(p=A)
def _lowerCamelCase ( self : Dict , A : List[Any] , A : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.token_embedder(A)
_UpperCAmelCase = encoder_input_tokens.shape[1]
_UpperCAmelCase = torch.arange(A , device=encoder_input_tokens.device)
x += self.position_encoding(A)
_UpperCAmelCase = self.dropout_pre(A)
# inverted the attention mask
_UpperCAmelCase = encoder_input_tokens.size()
_UpperCAmelCase = self.get_extended_attention_mask(A , A)
for lyr in self.encoders:
_UpperCAmelCase = lyr(A , A)[0]
_UpperCAmelCase = self.layer_norm(A)
return self.dropout_post(A), encoder_inputs_mask
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
_UpperCAmelCase = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
_UpperCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F"{len(_UpperCAmelCase )} examples to process." )
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 10_000
_UpperCAmelCase = time.time()
for text in data:
_UpperCAmelCase = F"{bos} {text.strip()} {sep}"
_UpperCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
_UpperCAmelCase = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
_UpperCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F"{len(_UpperCAmelCase )} examples processed." )
_UpperCAmelCase = F"{args.dump_file}.{args.tokenizer_name}.pickle"
_UpperCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
_UpperCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
_UpperCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCAmelCase__ = TypeVar("KT")
UpperCAmelCase__ = TypeVar("VT")
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : Optional[int] , A : KT | str = "root" , A : VT | None = None) -> str:
"""simple docstring"""
_UpperCAmelCase = key
_UpperCAmelCase = value
_UpperCAmelCase = []
def __repr__( self : Optional[int]) -> str:
"""simple docstring"""
return F"Node({self.key}: {self.value})"
@property
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
return len(self.forward)
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : Dict , A : float = 0.5 , A : int = 16) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = Node[KT, VT]()
_UpperCAmelCase = 0
_UpperCAmelCase = p
_UpperCAmelCase = max_level
def __str__( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = list(self)
if len(A) == 0:
return F"SkipList(level={self.level})"
_UpperCAmelCase = max((len(str(A)) for item in items) , default=4)
_UpperCAmelCase = max(A , 4) + 4
_UpperCAmelCase = self.head
_UpperCAmelCase = []
_UpperCAmelCase = node.forward.copy()
lines.append(F"[{node.key}]".ljust(A , '-') + '* ' * len(A))
lines.append(' ' * label_size + '| ' * len(A))
while len(node.forward) != 0:
_UpperCAmelCase = node.forward[0]
lines.append(
F"[{node.key}]".ljust(A , '-')
+ ' '.join(str(n.key) if n.key == node.key else '|' for n in forwards))
lines.append(' ' * label_size + '| ' * len(A))
_UpperCAmelCase = node.forward
lines.append('None'.ljust(A) + '* ' * len(A))
return F"SkipList(level={self.level})\n" + "\n".join(A)
def __iter__( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.head
while len(node.forward) != 0:
yield node.forward[0].key
_UpperCAmelCase = node.forward[0]
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : List[str] , A : str) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_UpperCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : List[str] , A : KT) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self._locate_node(A)
if node is not None:
for i, update_node in enumerate(A):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_UpperCAmelCase = node.forward[i]
else:
_UpperCAmelCase = update_node.forward[:i]
def _lowerCamelCase ( self : List[Any] , A : KT , A : VT) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self._locate_node(A)
if node is not None:
_UpperCAmelCase = value
else:
_UpperCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A):
update_vector.append(self.head)
_UpperCAmelCase = level
_UpperCAmelCase = Node(A , A)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(A)
else:
_UpperCAmelCase = new_node
def _lowerCamelCase ( self : List[Any] , A : VT) -> VT | None:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self._locate_node(A)
if node is not None:
return node.value
return None
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
_UpperCAmelCase = skip_list.head
_UpperCAmelCase = {}
while node.level != 0:
_UpperCAmelCase = node.forward[0]
_UpperCAmelCase = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
_UpperCAmelCase = skip_list.head
_UpperCAmelCase = {}
while node.level != 0:
_UpperCAmelCase = node.forward[0]
_UpperCAmelCase = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = SkipList()
assert skip_list.find('Some key' ) is None
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def A ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_UpperCAmelCase : Optional[Any] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A ( ) -> List[Any]:
'''simple docstring'''
def is_sorted(_UpperCAmelCase : Union[str, Any] ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
_UpperCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def A ( ) -> int:
'''simple docstring'''
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 639
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 1
|
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCAmelCase__ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
UpperCAmelCase__ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = get_test_to_tester_mapping(A)
_UpperCAmelCase = get_test_to_tester_mapping(A)
_UpperCAmelCase = {'BertModelTest': 'BertModelTester'}
_UpperCAmelCase = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(A) , A)
self.assertEqual(get_test_info.to_json(A) , A)
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_model_to_test_mapping(A)
_UpperCAmelCase = get_model_to_test_mapping(A)
_UpperCAmelCase = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
_UpperCAmelCase = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(A) , A)
self.assertEqual(get_test_info.to_json(A) , A)
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = get_model_to_tester_mapping(A)
_UpperCAmelCase = get_model_to_tester_mapping(A)
_UpperCAmelCase = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
_UpperCAmelCase = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(A) , A)
self.assertEqual(get_test_info.to_json(A) , A)
| 639
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 1
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
UpperCAmelCase__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
UpperCAmelCase__ = "main"
# Default branch name
UpperCAmelCase__ = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
UpperCAmelCase__ = "aaaaaaa"
# This commit does not exist, so we should 404.
UpperCAmelCase__ = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
UpperCAmelCase__ = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def A ( ) -> List[Any]:
'''simple docstring'''
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
print('Bonjour!' )
yield
print('Au revoir!' )
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers') is not None
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def _lowerCamelCase ( self : Optional[Any] , A : Union[str, Any]) -> List[str]:
"""simple docstring"""
with ContextManagers([]):
print('Transformers are awesome!')
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def _lowerCamelCase ( self : int , A : int) -> Optional[Any]:
"""simple docstring"""
with ContextManagers([context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def _lowerCamelCase ( self : str , A : Tuple) -> Union[str, Any]:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n')
@require_torch
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
self.assertEqual(find_labels(A) , ['labels'])
self.assertEqual(find_labels(A) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(A) , ['start_positions', 'end_positions'])
class __lowerCAmelCase ( A ):
pass
self.assertEqual(find_labels(A) , ['labels'])
@require_tf
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
self.assertEqual(find_labels(A) , ['labels'])
self.assertEqual(find_labels(A) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(A) , ['start_positions', 'end_positions'])
class __lowerCAmelCase ( A ):
pass
self.assertEqual(find_labels(A) , ['labels'])
@require_flax
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
self.assertEqual(find_labels(A) , [])
self.assertEqual(find_labels(A) , [])
self.assertEqual(find_labels(A) , [])
class __lowerCAmelCase ( A ):
pass
self.assertEqual(find_labels(A) , [])
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
from __future__ import annotations
def A ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 1
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCAmelCase__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCAmelCase__ = "sshleifer/student_marian_en_ro_6_1"
UpperCAmelCase__ = "sshleifer/tiny-mbart"
@require_torch
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Optional[int] , A : Dict=False , A : str=None , A : Dict=True , A : Tuple=True , A : Tuple=True , A : List[Any]=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A , num_train_epochs=1 , distributed=A , extra_args_str=A , predict_with_generate=A , do_train=A , do_eval=A , do_predict=A , )
_UpperCAmelCase = TrainerState.load_from_json(os.path.join(A , 'trainer_state.json')).log_history
if not do_eval:
return
_UpperCAmelCase = [log for log in logs if 'eval_loss' in log.keys()]
_UpperCAmelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_UpperCAmelCase = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , A)
assert not math.isnan(float(last_step_stats['eval_loss'])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A)
@require_torch_multi_gpu
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--sharded_ddp simple')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--sharded_ddp simple --fp16')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=A)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=A , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=A)
@require_apex
@require_torch_gpu
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--fp16 --fp16_backend=apex')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A , extra_args_str='--fp16 --fp16_backend=apex')
@parameterized.expand(['base', 'low', 'high', 'mixed'])
@require_torch_multi_gpu
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_UpperCAmelCase = experiments[experiment_id]
_UpperCAmelCase = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_UpperCAmelCase = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A , extra_args_str=data['extra_args_str'])
_UpperCAmelCase = len(re.findall(A , cl.err))
self.assertEqual(A , data['n_matches'])
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A , learning_rate=3E-4 , num_train_epochs=10 , distributed=A , )
# Check metrics
_UpperCAmelCase = TrainerState.load_from_json(os.path.join(A , 'trainer_state.json')).log_history
_UpperCAmelCase = [log for log in logs if 'eval_loss' in log.keys()]
_UpperCAmelCase = eval_metrics[0]
_UpperCAmelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , A)
# test if do_predict saves generations and metrics
_UpperCAmelCase = os.listdir(A)
_UpperCAmelCase = {os.path.basename(A) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A : str) -> Tuple[int, float]:
_UpperCAmelCase = '--skip_memory_metrics 0'
_UpperCAmelCase = self.run_trainer(
max_len=1_28 , model_name=A , learning_rate=3E-4 , num_train_epochs=1 , optim=A , distributed=A , extra_args_str=A , do_eval=A , do_predict=A , n_gpus_to_use=1 , )
# Check metrics
_UpperCAmelCase = TrainerState.load_from_json(Path(A , 'trainer_state.json')).log_history
_UpperCAmelCase = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20)
_UpperCAmelCase = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20)
_UpperCAmelCase = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
_UpperCAmelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_UpperCAmelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
_UpperCAmelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_UpperCAmelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_UpperCAmelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A , A , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A , A , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A , A , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : str , A : int , A : float = 3E-3 , A : str = "adafactor" , A : bool = False , A : str = None , A : int = 0 , A : bool = True , A : bool = True , A : bool = True , A : bool = True , A : int = None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_UpperCAmelCase = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A)}\n ".split()
_UpperCAmelCase = '\n --do_predict\n '.split()
_UpperCAmelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_UpperCAmelCase = get_gpu_count()
_UpperCAmelCase = get_torch_dist_unique_port()
_UpperCAmelCase = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_UpperCAmelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env())
else:
_UpperCAmelCase = ['run_translation.py'] + args
with patch.object(A , 'argv' , A):
main()
return output_dir
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
def A ( _UpperCAmelCase : list[list[int | float]] ) -> int:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = len(matrix[0] )
_UpperCAmelCase = min(_UpperCAmelCase , _UpperCAmelCase )
for row in range(_UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_UpperCAmelCase = True
for i in range(row + 1 , _UpperCAmelCase ):
if matrix[i][row] != 0:
_UpperCAmelCase , _UpperCAmelCase = matrix[i], matrix[row]
_UpperCAmelCase = False
break
if reduce:
rank -= 1
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 1
|
def A ( _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> str:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
_UpperCAmelCase = input_str.split('_' )
_UpperCAmelCase = 0 if use_pascal else 1
_UpperCAmelCase = words[start_index:]
_UpperCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCAmelCase = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(A , 'num_encoder_blocks'))
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Dict , A : List[str]=13 , A : Any=64 , A : Dict=3 , A : Union[str, Any]=4 , A : Optional[int]=[2, 2, 2, 2] , A : Tuple=[8, 4, 2, 1] , A : Union[str, Any]=[16, 32, 64, 1_28] , A : str=[1, 4, 8, 16] , A : List[str]=[1, 2, 4, 8] , A : Tuple=True , A : str=True , A : int="gelu" , A : int=0.1 , A : List[Any]=0.1 , A : Dict=0.0_2 , A : str=3 , A : Dict=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = sr_ratios
_UpperCAmelCase = depths
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = downsampling_rates
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , A : Union[str, Any] , A : str , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = SegformerModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
_UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowerCamelCase ( self : Any , A : Dict , A : int , A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SegformerForSemanticSegmentation(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any] , A : Dict , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = SegformerForSemanticSegmentation(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(A)
_UpperCAmelCase = model(A , labels=A)
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase = SegformerModelTester(self)
_UpperCAmelCase = SegformerConfigTester(self , config_class=A)
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = sum(self.model_tester.depths)
self.assertEqual(len(A) , A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase = len(A)
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
self.assertEqual(out_len + 1 , len(A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(A : str , A : List[Any] , A : str):
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A) , A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A):
continue
_UpperCAmelCase = model_class(A)
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
_UpperCAmelCase = model(**A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SegformerModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1))
@slow
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_00, 3_00)])
_UpperCAmelCase = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , A)
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A)
_UpperCAmelCase = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , A)
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''encodec'''
def __init__( self : List[str] , A : List[str]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , A : Union[str, Any]=2_40_00 , A : Optional[int]=1 , A : Optional[Any]=False , A : Any=None , A : Optional[Any]=None , A : int=1_28 , A : int=32 , A : int=1 , A : Dict=[8, 5, 4, 2] , A : Optional[int]="weight_norm" , A : str=7 , A : Optional[int]=7 , A : Union[str, Any]=3 , A : Optional[Any]=2 , A : Optional[Any]=True , A : Tuple="reflect" , A : Tuple=2 , A : List[str]=2 , A : Optional[Any]=1.0 , A : Any=10_24 , A : Union[str, Any]=None , A : List[Any]=True , **A : Any , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = target_bandwidths
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = audio_channels
_UpperCAmelCase = normalize
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = overlap
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_filters
_UpperCAmelCase = num_residual_layers
_UpperCAmelCase = upsampling_ratios
_UpperCAmelCase = norm_type
_UpperCAmelCase = kernel_size
_UpperCAmelCase = last_kernel_size
_UpperCAmelCase = residual_kernel_size
_UpperCAmelCase = dilation_growth_rate
_UpperCAmelCase = use_causal_conv
_UpperCAmelCase = pad_mode
_UpperCAmelCase = compress
_UpperCAmelCase = num_lstm_layers
_UpperCAmelCase = trim_right_ratio
_UpperCAmelCase = codebook_size
_UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}")
super().__init__(**A)
@property
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from itertools import product
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(_UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCAmelCase , repeat=_UpperCAmelCase ):
_UpperCAmelCase = sum(_UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ( ) -> float:
'''simple docstring'''
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(_UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(_UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 639
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 1
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self : List[Any] , A : List[Any] , A : int=13 , A : Union[str, Any]=7 , A : List[Any]=True , A : List[Any]=True , A : List[str]=True , A : List[Any]=True , A : int=99 , A : Dict=32 , A : Tuple=5 , A : Optional[int]=4 , A : Optional[int]=37 , A : List[Any]="gelu" , A : Dict=0.1 , A : List[Any]=0.1 , A : Optional[Any]=5_12 , A : int=16 , A : str=2 , A : int=0.0_2 , A : Optional[Any]=3 , A : Tuple=4 , A : List[Any]=None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Any , A : Optional[int] , A : int , A : List[Any] , A : Union[str, Any] , A : Any , A : List[str] , A : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = NystromformerModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A)
_UpperCAmelCase = model(A , token_type_ids=A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Optional[Any] , A : int , A : Any , A : Dict , A : Union[str, Any] , A : List[Any] , A : List[Any] , A : str) -> int:
"""simple docstring"""
_UpperCAmelCase = NystromformerForMaskedLM(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Tuple , A : List[Any] , A : int , A : Union[str, Any] , A : int , A : str , A : Tuple , A : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = NystromformerForQuestionAnswering(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : List[Any] , A : Union[str, Any] , A : str , A : List[str] , A : str , A : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForSequenceClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : Optional[Any] , A : List[Any] , A : Optional[int] , A : Tuple , A : int , A : int , A : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForTokenClassification(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : str , A : Tuple , A : Optional[int] , A : int , A : str , A : List[str] , A : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NystromformerForMultipleChoice(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = NystromformerModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A)
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A)
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NystromformerModel.from_pretrained(A)
self.assertIsNotNone(A)
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = NystromformerModel.from_pretrained('uw-madison/nystromformer-512')
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , A)
_UpperCAmelCase = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'the [MASK] of Belgium is Brussels'
_UpperCAmelCase = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512')
_UpperCAmelCase = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512')
_UpperCAmelCase = tokenizer(A , return_tensors='pt')
with torch.no_grad():
_UpperCAmelCase = model(encoding.input_ids).logits
_UpperCAmelCase = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(A) , 'capital')
| 639
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , A : List[Any]=1 , A : List[Any]=0 , A : Optional[int]=2 , A : str=5_12 , A : List[Any]="cls" , A : int=False , A : Tuple=True , **A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A)
_UpperCAmelCase = project_dim
_UpperCAmelCase = pooler_fn
_UpperCAmelCase = learn_encoder
_UpperCAmelCase = use_attention_mask
class __lowerCAmelCase ( A ):
UpperCamelCase = [R'''pooler''', R'''logit_scale''']
UpperCamelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase = '''roberta'''
UpperCamelCase = RobertaSeriesConfig
def __init__( self : Any , A : Dict) -> Any:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = XLMRobertaModel(A)
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim)
_UpperCAmelCase = getattr(A , 'has_pre_transformation' , A)
if self.has_pre_transformation:
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim)
_UpperCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def _lowerCamelCase ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.base_model(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , )
if self.has_pre_transformation:
_UpperCAmelCase = outputs['hidden_states'][-2]
_UpperCAmelCase = self.pre_LN(A)
_UpperCAmelCase = self.transformation_pre(A)
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCAmelCase = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 639
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowerCAmelCase :
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int]=13 , A : int=30 , A : Union[str, Any]=2 , A : Dict=3 , A : Optional[int]=True , A : Optional[int]=True , A : Dict=32 , A : List[str]=5 , A : str=4 , A : List[str]=37 , A : Union[str, Any]="gelu" , A : Tuple=0.1 , A : Optional[int]=0.1 , A : List[str]=10 , A : List[str]=0.0_2 , A : Any=3 , A : Any=None , A : int=2 , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 2
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : Optional[Any] , A : List[Any] , A : Optional[int] , A : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = DeiTModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : int , A : Union[str, Any] , A : Tuple , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = DeiTForMaskedImageModeling(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForMaskedImageModeling(A)
model.to(A)
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = DeiTForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = DeiTModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37)
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear))
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A)
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : str=False) -> Dict:
"""simple docstring"""
_UpperCAmelCase = super()._prepare_for_class(A , A , return_labels=A)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(A)
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
_UpperCAmelCase = model(**A).loss
loss.backward()
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(A)
model.gradient_checkpointing_enable()
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
_UpperCAmelCase = model(**A).loss
loss.backward()
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A),
*get_values(A),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}"):
_UpperCAmelCase = problem_type['title']
_UpperCAmelCase = problem_type['num_labels']
_UpperCAmelCase = model_class(A)
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels'])
_UpperCAmelCase = inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A) as warning_list:
_UpperCAmelCase = model(**A).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}")
loss.backward()
@slow
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DeiTModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
A)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = inputs.pixel_values.to(A)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase = model(A)
| 639
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Tuple , A : int=7 , A : str=3 , A : List[Any]=30 , A : str=4_00 , A : Tuple=True , A : str=None , A : Tuple=True , A : List[str]=[0.5, 0.5, 0.5] , A : int=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[str]=1 / 2_55 , A : List[Any]=True , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : Tuple , A : int , A : int=False) -> Dict:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(A , Image.Image):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w)
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h)
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_UpperCAmelCase = max(A , key=lambda A: item[0])[0]
_UpperCAmelCase = max(A , key=lambda A: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = DeformableDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A , 'image_mean'))
self.assertTrue(hasattr(A , 'image_std'))
self.assertTrue(hasattr(A , 'do_normalize'))
self.assertTrue(hasattr(A , 'do_resize'))
self.assertTrue(hasattr(A , 'do_rescale'))
self.assertTrue(hasattr(A , 'do_pad'))
self.assertTrue(hasattr(A , 'size'))
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , A)
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , A)
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A)
for image in image_inputs:
self.assertIsInstance(A , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A)
for image in image_inputs:
self.assertIsInstance(A , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A)
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {'image_id': 3_97_69, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=A , annotations=A , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , A)
_UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A))
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A))
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A))
@slow
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic')
_UpperCAmelCase = image_processing(images=A , annotations=A , masks_path=A , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A))
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A))
# verify masks
_UpperCAmelCase = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A)
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A))
| 639
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 1
|
import doctest
from collections import deque
import numpy as np
class __lowerCAmelCase :
def __init__( self : Tuple) -> None:
"""simple docstring"""
_UpperCAmelCase = [2, 1, 2, -1]
_UpperCAmelCase = [1, 2, 3, 4]
def _lowerCamelCase ( self : str) -> list[float]:
"""simple docstring"""
_UpperCAmelCase = len(self.first_signal)
_UpperCAmelCase = len(self.second_signal)
_UpperCAmelCase = max(A , A)
# create a zero matrix of max_length x max_length
_UpperCAmelCase = [[0] * max_length for i in range(A)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(A):
_UpperCAmelCase = deque(self.second_signal)
rotated_signal.rotate(A)
for j, item in enumerate(A):
matrix[i][j] += item
# multiply the matrix with the first signal
_UpperCAmelCase = np.matmul(np.transpose(A) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(A , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
from math import factorial
def A ( _UpperCAmelCase : int = 20 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCAmelCase = n // 2
return int(factorial(_UpperCAmelCase ) / (factorial(_UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 639
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] , A : Any) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss']):
_UpperCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A)
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sgugger/tiny-distilbert-classification'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
# set architectures equal to `None`
_UpperCAmelCase = None
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision')
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tinier_bart'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tinier_bart'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , 'inf_time.csv') , train_memory_csv_file=os.path.join(A , 'train_mem.csv') , inference_memory_csv_file=os.path.join(A , 'inf_mem.csv') , train_time_csv_file=os.path.join(A , 'train_time.csv') , env_info_csv_file=os.path.join(A , 'env.csv') , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
benchmark.run()
self.assertTrue(Path(os.path.join(A , 'inf_time.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'train_time.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'inf_mem.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'train_mem.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'env.csv')).exists())
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A : List[Any]):
self.assertTrue(hasattr(A , 'sequential'))
self.assertTrue(hasattr(A , 'cumulative'))
self.assertTrue(hasattr(A , 'current'))
self.assertTrue(hasattr(A , 'total'))
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , 'log.txt') , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(A , 'log.txt')).exists())
| 639
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = None
UpperCamelCase = BloomTokenizerFast
UpperCamelCase = BloomTokenizerFast
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = '''tokenizer_file'''
UpperCamelCase = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Union[str, Any] , **A : int) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_UpperCAmelCase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
_UpperCAmelCase = tokenizer.batch_encode_plus(A)['input_ids']
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.batch_decode(A)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : int , A : Dict=6) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCAmelCase = 'This is a simple input'
_UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase = ('This is a simple input', 'This is a pair')
_UpperCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(A , max_length=A)
tokenizer_r.encode_plus(A , max_length=A)
tokenizer_r.batch_encode_plus(A , max_length=A)
tokenizer_r.encode(A , max_length=A)
tokenizer_r.batch_encode_plus(A , max_length=A)
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding')
_UpperCAmelCase = None # Hotfixing padding = None
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='max_length')
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='max_length')
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='max_length' , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='max_length')
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='max_length')
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='max_length' , )
def _lowerCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=A)
_UpperCAmelCase = next(iter(A))['premise'] # pick up one data
_UpperCAmelCase = list(sample_data.values())
_UpperCAmelCase = list(map(tokenizer.encode , A))
_UpperCAmelCase = [tokenizer.decode(A , clean_up_tokenization_spaces=A) for x in output_tokens]
self.assertListEqual(A , A)
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
_UpperCAmelCase = TOKENIZER_CLASSES
else:
_UpperCAmelCase = {tokenizer_name: getattr(_UpperCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
_UpperCAmelCase = TOKENIZER_CLASSES[tokenizer_name]
_UpperCAmelCase = True
if checkpoint_name is None:
_UpperCAmelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_UpperCAmelCase = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
_UpperCAmelCase = tokenizer_class.from_pretrained(_UpperCAmelCase , force_download=_UpperCAmelCase )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
_UpperCAmelCase , _UpperCAmelCase = checkpoint.split('/' )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif add_prefix:
_UpperCAmelCase = checkpoint
_UpperCAmelCase = dump_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_UpperCAmelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_UpperCAmelCase = file_path.split(_UpperCAmelCase )[-1][0]
if next_char == "/":
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
_UpperCAmelCase = tokenizer.save_pretrained(
_UpperCAmelCase , legacy_format=_UpperCAmelCase , filename_prefix=_UpperCAmelCase )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(_UpperCAmelCase )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
UpperCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = []
def parse_line(_UpperCAmelCase : Tuple ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
_UpperCAmelCase = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def A ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def A ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return values.split(',' )
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase__ = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self : List[Any] , A : int) -> None:
"""simple docstring"""
_UpperCAmelCase = order
# a_{0} ... a_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase = [0.0] * self.order
def _lowerCamelCase ( self : Any , A : list[float] , A : list[float]) -> None:
"""simple docstring"""
if len(A) < self.order:
_UpperCAmelCase = [1.0, *a_coeffs]
if len(A) != self.order + 1:
_UpperCAmelCase = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(A)}"
)
raise ValueError(A)
if len(A) != self.order + 1:
_UpperCAmelCase = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(A)}"
)
raise ValueError(A)
_UpperCAmelCase = a_coeffs
_UpperCAmelCase = b_coeffs
def _lowerCamelCase ( self : Dict , A : float) -> float:
"""simple docstring"""
_UpperCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase = self.input_history[:-1]
_UpperCAmelCase = self.output_history[:-1]
_UpperCAmelCase = sample
_UpperCAmelCase = result
return result
| 639
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 1
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCAmelCase ( A , A ):
UpperCamelCase = 1
@register_to_config
def __init__( self : Optional[int] , A : int = 10_00 , A : Optional[Union[np.ndarray, List[float]]] = None) -> Optional[int]:
"""simple docstring"""
self.set_timesteps(A)
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCAmelCase = 4
# running values
_UpperCAmelCase = []
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : Union[str, torch.device] = None) -> int:
"""simple docstring"""
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
_UpperCAmelCase = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
_UpperCAmelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
_UpperCAmelCase = torch.sin(steps * math.pi / 2) ** 2
_UpperCAmelCase = (1.0 - self.betas**2) ** 0.5
_UpperCAmelCase = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
_UpperCAmelCase = timesteps.to(A)
_UpperCAmelCase = []
def _lowerCamelCase ( self : Dict , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler')
_UpperCAmelCase = (self.timesteps == timestep).nonzero().item()
_UpperCAmelCase = timestep_index + 1
_UpperCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A)
if len(self.ets) == 1:
_UpperCAmelCase = self.ets[-1]
elif len(self.ets) == 2:
_UpperCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
_UpperCAmelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCAmelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCAmelCase = self._get_prev_sample(A , A , A , A)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A)
def _lowerCamelCase ( self : Union[str, Any] , A : torch.FloatTensor , *A : Union[str, Any] , **A : Any) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCamelCase ( self : List[str] , A : Dict , A : int , A : List[str] , A : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.alphas[timestep_index]
_UpperCAmelCase = self.betas[timestep_index]
_UpperCAmelCase = self.alphas[prev_timestep_index]
_UpperCAmelCase = self.betas[prev_timestep_index]
_UpperCAmelCase = (sample - sigma * ets) / max(A , 1E-8)
_UpperCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 639
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 1
|
from __future__ import annotations
import queue
class __lowerCAmelCase :
def __init__( self : Tuple , A : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
_UpperCAmelCase = None
def A ( ) -> TreeNode:
'''simple docstring'''
print('\n********Press N to stop entering at any point of time********\n' )
_UpperCAmelCase = input('Enter the value of the root node: ' ).strip().lower()
_UpperCAmelCase = queue.Queue()
_UpperCAmelCase = TreeNode(int(_UpperCAmelCase ) )
q.put(_UpperCAmelCase )
while not q.empty():
_UpperCAmelCase = q.get()
_UpperCAmelCase = F"Enter the left node of {node_found.data}: "
_UpperCAmelCase = input(_UpperCAmelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_UpperCAmelCase = TreeNode(int(_UpperCAmelCase ) )
_UpperCAmelCase = left_node
q.put(_UpperCAmelCase )
_UpperCAmelCase = F"Enter the right node of {node_found.data}: "
_UpperCAmelCase = input(_UpperCAmelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_UpperCAmelCase = TreeNode(int(_UpperCAmelCase ) )
_UpperCAmelCase = right_node
q.put(_UpperCAmelCase )
raise
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
_UpperCAmelCase = queue.Queue()
q.put(_UpperCAmelCase )
while not q.empty():
_UpperCAmelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
_UpperCAmelCase = queue.Queue()
q.put(_UpperCAmelCase )
while not q.empty():
_UpperCAmelCase = []
while not q.empty():
_UpperCAmelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_UpperCAmelCase )
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
_UpperCAmelCase = []
_UpperCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_UpperCAmelCase )
_UpperCAmelCase = n.left
# end of while means current node doesn't have left child
_UpperCAmelCase = stack.pop()
# start to traverse its right child
_UpperCAmelCase = n.right
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
_UpperCAmelCase = []
_UpperCAmelCase = node
while n or stack:
while n:
stack.append(_UpperCAmelCase )
_UpperCAmelCase = n.left
_UpperCAmelCase = stack.pop()
print(n.data , end=',' )
_UpperCAmelCase = n.right
def A ( _UpperCAmelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not node:
return
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = node
stacka.append(_UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def A ( _UpperCAmelCase : str = "" , _UpperCAmelCase : Optional[Any]=50 , _UpperCAmelCase : List[Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
_UpperCAmelCase , _UpperCAmelCase = divmod(width - len(_UpperCAmelCase ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCAmelCase__ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = StableDiffusionSAGPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = False
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0)
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_UpperCAmelCase = CLIPTextModel(A)
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self : Optional[int] , A : Tuple , A : List[str]=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
_UpperCAmelCase = sag_pipe.to(A)
sag_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sag_pipe(
[prompt] , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sag_pipe.to(A)
sag_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sag_pipe(
[prompt] , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sag_pipe.to(A)
sag_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
_UpperCAmelCase = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 639
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 1
|
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
| 639
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
def __init__( self : List[str] , A : Any , A : Optional[Any]=99 , A : Tuple=13 , A : Optional[Any]=7 , A : Tuple=9 , A : List[Any]=True , A : Union[str, Any]=True , A : Optional[Any]=False , A : Dict=32 , A : Tuple=5 , A : List[Any]=4 , A : Union[str, Any]=37 , A : Dict=8 , A : List[Any]=0.1 , A : int=0.0_0_2 , A : str=1 , A : Union[str, Any]=0 , A : Dict=0 , A : Union[str, Any]=None , A : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base')
def _lowerCamelCase ( self : Tuple , A : int , A : Union[str, Any] , A : List[str] , A : Any=None , A : List[Any]=None , A : List[Any]=None , A : int=None , A : Optional[Any]=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=A)
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=A)
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=A)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1)
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1)
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(A , A , A)
return config, input_dict
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self : str , A : List[Any] , A : Any , A : List[str] , A : str , A : Tuple , A : str , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
input_ids=A , decoder_input_ids=A , attention_mask=A , decoder_attention_mask=A , )
_UpperCAmelCase = model(input_ids=A , decoder_input_ids=A)
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(A) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : Optional[Any] , A : List[str] , A : str , A : List[str] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A).get_decoder().to(A).eval()
# first forward pass
_UpperCAmelCase = model(A , use_cache=A)
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A , use_cache=A)
self.parent.assertTrue(len(A) == len(A))
self.parent.assertTrue(len(A) == len(A) + 1)
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCAmelCase = model(A)['last_hidden_state']
_UpperCAmelCase = model(A , past_key_values=A)['last_hidden_state']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3))
def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A).to(A).half().eval()
_UpperCAmelCase = model(**A)['last_hidden_state']
self.parent.assertFalse(torch.isnan(A).any().item())
@require_torch
class __lowerCAmelCase ( A , A , A , unittest.TestCase ):
UpperCamelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase = [0.8, 0.9]
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0]).to(A)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=A , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*A)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(A).eval()
model.to(A)
_UpperCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=A),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A),
}
for attn_name, (name, mask) in zip(A , head_masking.items()):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=A)
_UpperCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=A , return_dict_in_generate=A , **A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=A).to(A)
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=A , legacy=A)
_UpperCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_UpperCAmelCase = tokenizer(A , return_tensors='pt' , padding=A).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
])
# fmt: on
torch.testing.assert_allclose(A , A)
_UpperCAmelCase = model.generate(input_ids.to(A))
_UpperCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_UpperCAmelCase = tokenizer.batch_decode(A)
self.assertEqual(A , A)
| 639
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 1
|
import numpy as np
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = int(np.ceil((x_end - xa) / h ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_UpperCAmelCase ):
_UpperCAmelCase = f(_UpperCAmelCase , y[k] )
_UpperCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_UpperCAmelCase = f(x + h , y[k] + h * ka )
_UpperCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from scipy.stats import spearmanr
import datasets
UpperCAmelCase__ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCAmelCase__ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
UpperCAmelCase__ = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def _lowerCamelCase ( self : List[str] , A : Tuple , A : Union[str, Any] , A : Union[str, Any]=False) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = spearmanr(A , A)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 639
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Dict:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : List[Any] ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase__ = 5_0003
UpperCAmelCase__ = 5_0002
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PLBartTokenizer(A , language_codes='base' , keep_accents=A)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = PLBartTokenizer(A , language_codes='base' , keep_accents=A)
_UpperCAmelCase = tokenizer.tokenize('This is a test')
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = [tokenizer.convert_ids_to_tokens(A) for x in range(end - 4 , A)]
self.assertListEqual(A , ['__java__', '__python__', '__en_XX__', '<mask>'])
_UpperCAmelCase = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_UpperCAmelCase = tokenizer(A).input_ids
self.assertEqual(
tokenizer.decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A) , A , )
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = PLBartTokenizer(A , language_codes='multi' , keep_accents=A)
_UpperCAmelCase = tokenizer.tokenize('This is a test')
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = [tokenizer.convert_ids_to_tokens(A) for x in range(end - 7 , A)]
self.assertListEqual(
A , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'])
_UpperCAmelCase = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_UpperCAmelCase = tokenizer(A).input_ids
self.assertEqual(
tokenizer.decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A) , A , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCamelCase ( cls : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX')
_UpperCAmelCase = 1
return cls
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_00_01)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_00_02)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_00_03)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A)
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
self.assertIn(A , self.tokenizer.all_special_ids)
_UpperCAmelCase = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
_UpperCAmelCase = self.tokenizer.decode(A , skip_special_tokens=A)
_UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A)
self.assertEqual(A , A)
self.assertNotIn(self.tokenizer.eos_token , A)
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , A)
_UpperCAmelCase = 10
_UpperCAmelCase = self.tokenizer(A , max_length=A , truncation=A).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , A)
self.assertEqual(len(A) , A)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__']) , [5_00_04, 5_00_01])
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A)
_UpperCAmelCase = PLBartTokenizer.from_pretrained(A)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A)
@require_torch
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors='pt')
_UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , A)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
_UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(A , A)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors='pt')
_UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors='pt')
_UpperCAmelCase = targets['input_ids']
_UpperCAmelCase = shift_tokens_right(A , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _lowerCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java')
self.assertEqual(
nested_simplify(A) , {
# A, test, EOS, en_XX
'input_ids': [[1_50, 2_42, 2, 5_00_03]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_00_01,
} , )
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , A : bool = True , A : Dict[str, int] = None , A : float = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 3_84}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : str , A : np.ndarray , A : Dict[str, int] , A : float , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Any , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
_UpperCAmelCase = size['shortest_edge']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCAmelCase = int(shortest_edge / crop_pct)
_UpperCAmelCase = get_resize_output_image_size(A , size=A , default_to_square=A)
_UpperCAmelCase = resize(image=A , size=A , resample=A , data_format=A , **A)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : int , ) -> Optional[Any]:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : Optional[Any] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Dict , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : float = None , A : PILImageResampling = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , crop_pct=A , resample=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
| 639
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''autoformer'''
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , A : Optional[int] = None , A : Optional[int] = None , A : str = "student_t" , A : str = "nll" , A : int = 1 , A : List[int] = [1, 2, 3, 4, 5, 6, 7] , A : bool = True , A : int = 0 , A : int = 0 , A : int = 0 , A : int = 0 , A : Optional[List[int]] = None , A : Optional[List[int]] = None , A : int = 64 , A : int = 2 , A : int = 2 , A : int = 2 , A : int = 2 , A : int = 32 , A : int = 32 , A : str = "gelu" , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : int = 1_00 , A : float = 0.0_2 , A : bool = True , A : List[Any]=True , A : int = 10 , A : int = 25 , A : int = 3 , **A : Optional[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length if context_length is not None else prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(self.lags_sequence) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
# Autoformer
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=A , **A)
@property
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 639
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 1
|
import os
from distutils.util import strtobool
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
for e in env_keys:
_UpperCAmelCase = int(os.environ.get(_UpperCAmelCase , -1 ) )
if val >= 0:
return val
return default
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=False ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = os.environ.get(_UpperCAmelCase , str(_UpperCAmelCase ) )
return strtobool(_UpperCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Dict="no" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = os.environ.get(_UpperCAmelCase , str(_UpperCAmelCase ) )
return value
| 639
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 1
|
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(_UpperCAmelCase )
while len(_UpperCAmelCase ) != 1:
_UpperCAmelCase = [int(_UpperCAmelCase ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(_UpperCAmelCase ) ):
total *= numbers[i]
_UpperCAmelCase = str(_UpperCAmelCase )
steps += 1
return steps
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(_UpperCAmelCase )
while len(_UpperCAmelCase ) != 1:
_UpperCAmelCase = [int(_UpperCAmelCase ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(_UpperCAmelCase ) ):
total += numbers[i]
_UpperCAmelCase = str(_UpperCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 1
|
import math
import sys
import cva
import numpy as np
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
'''simple docstring'''
# For applying gaussian function for each element in matrix.
_UpperCAmelCase = math.sqrt(_UpperCAmelCase )
_UpperCAmelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def A ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
'''simple docstring'''
# Creates a gaussian kernel of given dimension.
_UpperCAmelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
_UpperCAmelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.zeros(img.shape )
_UpperCAmelCase = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_UpperCAmelCase = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
_UpperCAmelCase = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
_UpperCAmelCase = val
return imga
def A ( _UpperCAmelCase : list ) -> tuple:
'''simple docstring'''
_UpperCAmelCase = args[1] if args[1:] else '../image_data/lena.jpg'
_UpperCAmelCase = float(args[2] ) if args[2:] else 1.0
_UpperCAmelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_UpperCAmelCase = int(args[4] )
_UpperCAmelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
_UpperCAmelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parse_args(sys.argv)
UpperCAmelCase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
UpperCAmelCase__ = img / 255
UpperCAmelCase__ = out.astype("float32")
UpperCAmelCase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCAmelCase__ = out * 255
UpperCAmelCase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 639
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A ( _UpperCAmelCase : Dict=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(add_help=_UpperCAmelCase , allow_abbrev=_UpperCAmelCase )
# The main config parser
_UpperCAmelCase = config_command_parser(_UpperCAmelCase )
# The subparser to add commands to
_UpperCAmelCase = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(_UpperCAmelCase , parents=[parent_parser] )
update_command_parser(_UpperCAmelCase , parents=[parent_parser] )
return config_parser
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = get_config_parser()
_UpperCAmelCase = config_parser.parse_args()
if not hasattr(_UpperCAmelCase , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : int , A : str) -> str:
"""simple docstring"""
with open(A , encoding='utf-8') as input_file:
_UpperCAmelCase = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
_UpperCAmelCase = input_file.read()
_UpperCAmelCase = regexp.search(A)
return match
def _lowerCamelCase ( self : Optional[Any] , A : str) -> List[str]:
"""simple docstring"""
with open(A , encoding='utf-8') as input_file:
_UpperCAmelCase = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL)
_UpperCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCAmelCase = regexp.finditer(A)
_UpperCAmelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = Path('./datasets')
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A)):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}")
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase = Path('./datasets')
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(A)):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead.")
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
UpperCAmelCase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A )
class __lowerCAmelCase :
def __call__( self : List[str] , A : Any , A : Optional[str] = None , A : Optional[str] = None , A : Union[bool, str] = False , A : Union[bool, str] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = None , **A : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_UpperCAmelCase = titles if not isinstance(A , A) else [titles]
_UpperCAmelCase = texts if not isinstance(A , A) else [texts]
_UpperCAmelCase = len(A)
_UpperCAmelCase = questions if not isinstance(A , A) else [questions] * n_passages
assert len(A) == len(
A), F"There should be as many titles than texts but got {len(A)} titles and {len(A)} texts."
_UpperCAmelCase = super().__call__(A , A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = super().__call__(A , add_special_tokens=A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A)
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_UpperCAmelCase = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A)
def _lowerCamelCase ( self : Dict , A : BatchEncoding , A : DPRReaderOutput , A : int = 16 , A : int = 64 , A : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(A)
_UpperCAmelCase = sorted(range(A) , reverse=A , key=relevance_logits.__getitem__)
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id)
else:
_UpperCAmelCase = len(A)
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(A) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : List[int] , A : int , A : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = []
for start_index, start_score in enumerate(A):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_UpperCAmelCase = sorted(A , key=lambda A: x[1] , reverse=A)
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(A) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class __lowerCAmelCase ( A , A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DPRReaderTokenizer
| 639
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.