code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = StableDiffusionSAGPipeline
UpperCAmelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = False
def __lowercase( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__UpperCamelCase = CLIPTextModel(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Any:
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowercase( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self ) -> Dict:
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__UpperCamelCase = sag_pipe.to(_SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = '.'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowercase( self ) -> int:
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase = sag_pipe.to(_SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = '.'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase = sag_pipe.to(_SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = '.'
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
__UpperCamelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 383 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _a ( __lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , __lowercase )
return [m.group(0 ) for m in matches]
def _a ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCamelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCamelCase = collections.defaultdict(__lowercase )
__UpperCamelCase = collections.defaultdict(__lowercase )
__UpperCamelCase = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
__UpperCamelCase = None
if _re_tf_models.match(__lowercase ) is not None:
__UpperCamelCase = tf_models
__UpperCamelCase = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
__UpperCamelCase = flax_models
__UpperCamelCase = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
__UpperCamelCase = pt_models
__UpperCamelCase = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCamelCase = True
break
# Try again after removing the last word in the name
__UpperCamelCase = ''.join(camel_case_split(__lowercase )[:-1] )
__UpperCamelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCamelCase = list(__lowercase )
all_models.sort()
__UpperCamelCase = {'model_type': all_models}
__UpperCamelCase = [pt_models[t] for t in all_models]
__UpperCamelCase = [tf_models[t] for t in all_models]
__UpperCamelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCamelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCamelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCamelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCamelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCamelCase = 'AutoTokenizer'
__UpperCamelCase = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCamelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__UpperCamelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase , __lowercase , __lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase , __lowercase ):
continue
# First extract all model_names
__UpperCamelCase = []
for name in getattr(__lowercase , __lowercase ).values():
if isinstance(__lowercase , __lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _a ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = get_frameworks_table()
__UpperCamelCase = Dataset.from_pandas(__lowercase )
__UpperCamelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=__lowercase )
__UpperCamelCase = Dataset.from_json(__lowercase )
__UpperCamelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__lowercase ) )
}
__UpperCamelCase = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCamelCase = sorted(table.keys() )
__UpperCamelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__UpperCamelCase = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(__lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
__UpperCamelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCamelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=__lowercase , repo_type='dataset' , token=__lowercase , commit_message=__lowercase , )
def _a ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCamelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCamelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCamelCase = pipeline_tasks[key]['pt']
if isinstance(__lowercase , (list, tuple) ):
__UpperCamelCase = model[0]
__UpperCamelCase = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
__UpperCamelCase = ', '.join(__lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 383 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowerCAmelCase_( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
super().__init__(*lowercase__ ,**lowercase__ )
if config is None:
assert isinstance(self.model ,lowercase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowerCAmelCase__ : str = self.model.config
else:
lowerCAmelCase__ : Union[str, Any] = config
lowerCAmelCase__ : Any = data_args
lowerCAmelCase__ : Optional[Any] = self.config.tgt_vocab_size if isinstance(self.config ,lowercase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
lowerCAmelCase__ : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase__ : List[Any] = label_smoothed_nll_loss
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if self.optimizer is None:
lowerCAmelCase__ : Optional[Any] = ["bias", "LayerNorm.weight"]
lowerCAmelCase__ : Dict = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase__ : List[str] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase__ : Optional[int] = Adafactor
lowerCAmelCase__ : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase__ : int = AdamW
lowerCAmelCase__ : Dict = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase__ : Optional[Any] = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase__ : List[str] = OSS(
params=lowercase__ ,optim=lowercase__ ,**lowercase__ ,)
else:
lowerCAmelCase__ : int = optimizer_cls(lowercase__ ,**lowercase__ )
if self.lr_scheduler is None:
lowerCAmelCase__ : List[str] = self._get_lr_scheduler(lowercase__ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase__ : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase__ : str = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase__ : Dict = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=lowercase__ )
return scheduler
def UpperCAmelCase_ ( self ) -> int:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase__ : List[str] = model(**lowercase__ ,use_cache=lowercase__ )[0]
lowerCAmelCase__ : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase__ : Optional[Any] = model(**lowercase__ ,labels=lowercase__ ,use_cache=lowercase__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase__ : str = model(**lowercase__ ,use_cache=lowercase__ )[0]
lowerCAmelCase__ : List[str] = torch.nn.functional.log_softmax(lowercase__ ,dim=-1 )
lowerCAmelCase__ : Optional[int] = self.loss_fn(lowercase__ ,lowercase__ ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = inputs.pop("""labels""" )
lowerCAmelCase__ : Any = self._compute_loss(lowercase__ ,lowercase__ ,lowercase__ )
return loss
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,) -> Any:
lowerCAmelCase__ : Tuple = self._prepare_inputs(lowercase__ )
lowerCAmelCase__ : int = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase__ : str = self.model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,**lowercase__ ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase__ : List[str] = self._pad_tensors_to_max_len(lowercase__ ,gen_kwargs["""max_length"""] )
lowerCAmelCase__ : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase__ : str = self._compute_loss(lowercase__ ,lowercase__ ,lowercase__ )
lowerCAmelCase__ : Union[str, Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase__ : Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase__ : Optional[int] = self._pad_tensors_to_max_len(lowercase__ ,gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Tuple = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F""" padded to `max_length`={max_length}""" )
lowerCAmelCase__ : Optional[int] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
lowerCAmelCase__ : List[str] = tensor
return padded_tensor
| 702 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a ( _UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = (DPMSolverSDEScheduler,)
UpperCamelCase_ : List[Any] = 10
def UpperCAmelCase_ ( self : Optional[Any] , **lowerCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a )
return config
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def UpperCAmelCase_ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(__a , __a )
__lowercase = model(__a , __a )
__lowercase = scheduler.step(__a , __a , __a )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(__a ) )
__lowercase = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def UpperCAmelCase_ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(__a , __a )
__lowercase = model(__a , __a )
__lowercase = scheduler.step(__a , __a , __a )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(__a ) )
__lowercase = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(__a , __a )
__lowercase = model(__a , __a )
__lowercase = scheduler.step(__a , __a , __a )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(__a ) )
__lowercase = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def UpperCAmelCase_ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase = sample.to(__a )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(__a , __a )
__lowercase = model(__a , __a )
__lowercase = scheduler.step(__a , __a , __a )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(__a ) )
__lowercase = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 332 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """philschmid/bart-large-cnn-samsum"""
__SCREAMING_SNAKE_CASE = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
__SCREAMING_SNAKE_CASE = """summarizer"""
__SCREAMING_SNAKE_CASE = AutoTokenizer
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
__SCREAMING_SNAKE_CASE = ["""text"""]
__SCREAMING_SNAKE_CASE = ["""text"""]
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
return self.pre_processor(__a , return_tensors='pt' , truncation=__a )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
return self.model.generate(**__a )[0]
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
| 146 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__magic_name__ : Optional[Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 ):
_snake_case = tokenizer
_snake_case = dataset
_snake_case = len(lowerCamelCase ) if n_tasks is None else n_tasks
_snake_case = n_copies
def __iter__( self ):
_snake_case = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_snake_case = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = start_length
_snake_case = eof_strings
_snake_case = tokenizer
def __call__( self , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
_snake_case = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_snake_case = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = re.split("(%s)" % "|".join(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=20 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = defaultdict(SCREAMING_SNAKE_CASE__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(SCREAMING_SNAKE_CASE__ ) ):
with torch.no_grad():
_snake_case = batch["ids"].shape[-1]
_snake_case = accelerator.unwrap_model(SCREAMING_SNAKE_CASE__ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# each task is generated batch_size times
_snake_case = batch["task_id"].repeat(SCREAMING_SNAKE_CASE__ )
_snake_case = accelerator.pad_across_processes(
SCREAMING_SNAKE_CASE__ , dim=1 , pad_index=tokenizer.pad_token_id )
_snake_case , _snake_case = accelerator.gather((generated_tokens, generated_tasks) )
_snake_case = generated_tokens.cpu().numpy()
_snake_case = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
gen_token_dict[task].append(SCREAMING_SNAKE_CASE__ )
_snake_case = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
code_gens[task].append(remove_last_block(SCREAMING_SNAKE_CASE__ ) )
return code_gens
def snake_case_ ( ):
'''simple docstring'''
_snake_case = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
_snake_case = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_snake_case = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_snake_case = "false"
if args.num_workers is None:
_snake_case = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_snake_case = Accelerator()
set_seed(args.seed , device_specific=SCREAMING_SNAKE_CASE__ )
# Load model and tokenizer
_snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
_snake_case = tokenizer.eos_token
_snake_case = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_snake_case = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] ),
}
# Load evaluation dataset and metric
_snake_case = load_dataset("openai_humaneval" )
_snake_case = load_metric("code_eval" )
_snake_case = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_snake_case = args.n_samples // args.batch_size
_snake_case = TokenizedDataset(SCREAMING_SNAKE_CASE__ , human_eval["test"] , n_copies=SCREAMING_SNAKE_CASE__ , n_tasks=SCREAMING_SNAKE_CASE__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
_snake_case = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_snake_case = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_snake_case , _snake_case = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = complete_code(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , n_tasks=SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size , **SCREAMING_SNAKE_CASE__ , )
if accelerator.is_main_process:
_snake_case = []
for task in tqdm(range(SCREAMING_SNAKE_CASE__ ) ):
_snake_case = human_eval["test"][task]["test"]
_snake_case = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_snake_case , _snake_case = code_eval_metric.compute(
references=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : Tuple = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''xlm'''
UpperCAmelCase__ : Dict = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , lowerCamelCase=30_145 , lowerCamelCase=2_048 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=2_048**-0.5 , lowerCamelCase=1e-12 , lowerCamelCase=0.02 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase="first" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=5 , lowerCamelCase=5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=0 , **lowerCamelCase , ):
_snake_case = vocab_size
_snake_case = emb_dim
_snake_case = n_layers
_snake_case = n_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = gelu_activation
_snake_case = sinusoidal_embeddings
_snake_case = causal
_snake_case = asm
_snake_case = n_langs
_snake_case = use_lang_emb
_snake_case = layer_norm_eps
_snake_case = bos_index
_snake_case = eos_index
_snake_case = pad_index
_snake_case = unk_index
_snake_case = mask_index
_snake_case = is_encoder
_snake_case = max_position_embeddings
_snake_case = embed_init_std
_snake_case = init_std
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_proj_to_labels
_snake_case = summary_first_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = mask_token_id
_snake_case = lang_id
if "n_words" in kwargs:
_snake_case = kwargs["n_words"]
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase( self ):
if self.task == "multiple-choice":
_snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
_snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 368 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : Any = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : List[str] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Optional[Any] = bs[:]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Dict = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Optional[int] = set()
__SCREAMING_SNAKE_CASE : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : Dict="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Union[str, Any]="<mask>" , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Tuple = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Dict = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : int = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Tuple = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : List[Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = bigram
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : int = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : List[str] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Dict = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : List[str] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = word
return word
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : int = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : int = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple=False , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : Dict = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : Optional[int] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : List[str] = len(lowerCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : List[str] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 578 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: Path , _lowerCamelCase: str = None , _lowerCamelCase: str = None , _lowerCamelCase: str = None , ):
if config_name_or_path is None:
__SCREAMING_SNAKE_CASE : List[str] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Tuple = question_encoder_name_or_path
__SCREAMING_SNAKE_CASE : int = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__SCREAMING_SNAKE_CASE : List[Any] = RagConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = gen_config
__SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_config
__SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained_question_encoder_generator(
_lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
rag_model.save_pretrained(_lowerCamelCase )
# Sanity check.
model_class.from_pretrained(_lowerCamelCase )
# Save tokenizers.
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
UpperCamelCase__ : Dict = parser.parse_args()
UpperCamelCase__ : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 578 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class snake_case__ ( UpperCamelCase_ ):
'''simple docstring'''
A__ = '''deberta-v2'''
def __init__( self : Optional[Any] , __a : Any=128100 , __a : Optional[int]=1536 , __a : Dict=24 , __a : List[str]=24 , __a : Tuple=6144 , __a : List[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Optional[int]=512 , __a : Optional[Any]=0 , __a : Union[str, Any]=0.0_2 , __a : List[Any]=1e-7 , __a : List[str]=False , __a : str=-1 , __a : List[Any]=0 , __a : List[Any]=True , __a : Optional[Any]=None , __a : Tuple=0 , __a : str="gelu" , **__a : Optional[int] , ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__snake_case : List[Any] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Optional[Any] = relative_attention
__snake_case : Optional[Any] = max_relative_positions
__snake_case : Optional[int] = pad_token_id
__snake_case : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
__snake_case : Optional[int] = [x.strip() for x in pos_att_type.lower().split('|' )]
__snake_case : Any = pos_att_type
__snake_case : int = vocab_size
__snake_case : Dict = layer_norm_eps
__snake_case : Tuple = kwargs.get('pooler_hidden_size' , UpperCamelCase__ )
__snake_case : Any = pooler_dropout
__snake_case : Union[str, Any] = pooler_hidden_act
class snake_case__ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__snake_case : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return 12
def A_ ( self : Dict , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , __a : "PreTrainedTokenizerBase" = None , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 712 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = GPTSanJapaneseTokenizer
A__ = False
A__ = {'''do_clean_text''': False, '''add_prefix_space''': False}
def A_ ( self : str ) -> Any:
'''simple docstring'''
super().setUp()
# fmt: off
__snake_case : int = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__snake_case : Optional[int] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__snake_case : Any = {'unk_token': '<unk>'}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__a ) )
def A_ ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , __a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__snake_case : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def A_ ( self : Dict , __a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case : Any = self.get_input_output_texts(__a )
__snake_case : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
__snake_case : int = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def A_ ( self : Any ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self : int ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = self.get_tokenizer()
# Testing tokenization
__snake_case : Union[str, Any] = 'こんにちは、世界。 こんばんは、㔺界。'
__snake_case : Optional[int] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__snake_case : str = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__snake_case : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__snake_case : Dict = tokens + [tokenizer.unk_token]
__snake_case : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
__snake_case : int = self.get_tokenizer()
# Testing tokenization
__snake_case : Tuple = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__snake_case : Dict = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__snake_case : Tuple = tokenizer.encode(__a )
__snake_case : Tuple = tokenizer.decode(__a )
self.assertEqual(__a , __a )
@slow
def A_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case : List[str] = 'こんにちは、世界。'
__snake_case : Any = 'こんばんは、㔺界。😀'
__snake_case : Union[str, Any] = 'こんにちは、世界。こんばんは、世界。😀'
__snake_case : Optional[int] = tokenizer.encode(prefix_text + input_text )
__snake_case : Optional[int] = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__snake_case : List[str] = tokenizer.encode(__a , prefix_text=__a )
__snake_case : Any = tokenizer.decode(__a )
__snake_case : Tuple = tokenizer.decode(__a )
__snake_case : List[Any] = tokenizer.decode(__a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
@slow
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__snake_case : Union[str, Any] = 'こんにちは、世界。'
__snake_case : Tuple = 'こんばんは、㔺界。😀'
__snake_case : str = len(tokenizer.encode(__a ) ) - 2
__snake_case : Optional[Any] = len(tokenizer.encode(__a ) ) - 2
__snake_case : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
__snake_case : int = [1] * (len_prefix + len_text + 1) + [0]
__snake_case : str = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case : List[str] = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case : List[str] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case : Tuple = tokenizer(__a , prefix_text=__a ).token_type_ids
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : int = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case : Optional[int] = tokenizer.encode('あンいワ' )
__snake_case : List[str] = tokenizer.encode('' , prefix_text='あンいワ' )
__snake_case : str = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertNotEqual(__a , __a )
self.assertNotEqual(__a , __a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__snake_case : Any = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__snake_case : Any = tokenizer(__a , padding=__a )
__snake_case : List[Any] = tokenizer.batch_encode_plus(__a , padding=__a )
# fmt: off
__snake_case : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__snake_case : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case : Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __a )
self.assertListEqual(x_token.token_type_ids , __a )
self.assertListEqual(x_token.attention_mask , __a )
self.assertListEqual(x_token_a.input_ids , __a )
self.assertListEqual(x_token_a.token_type_ids , __a )
self.assertListEqual(x_token_a.attention_mask , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
# tokenizer has no padding token
pass
| 124 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE: str = '''bart'''
SCREAMING_SNAKE_CASE: int = True
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> int:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = qar_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
SCREAMING_SNAKE_CASE_ = sas_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Union[str, Any]:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE_ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
SCREAMING_SNAKE_CASE_ = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE_ = faiss.index_cpu_to_gpu(lowerCAmelCase , 1 , lowerCAmelCase )
wikiaab_gpu_index_flat.add(lowerCAmelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
SCREAMING_SNAKE_CASE_ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Optional[Any]:
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('eli5' , name='LFQA_reddit' )
SCREAMING_SNAKE_CASE_ = elia['train_eli5']
SCREAMING_SNAKE_CASE_ = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[Any] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = load_train_data()
def _a ( lowerCAmelCase , lowerCAmelCase=10 )-> Tuple:
SCREAMING_SNAKE_CASE_ = embed_questions_for_retrieval([question] , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eli5_train_q_index.search(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [elia_train[int(lowerCAmelCase )] for i in I[0]]
return nn_examples
def _a ( lowerCAmelCase , lowerCAmelCase="wiki40b" , lowerCAmelCase="dense" , lowerCAmelCase=10 )-> Union[str, Any]:
if source == "none":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_qa_dense_index(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_es_index(
lowerCAmelCase , lowerCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
SCREAMING_SNAKE_CASE_ = 'question: {} context: {}'.format(lowerCAmelCase , lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase : None),
} )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=64 , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=0.9_5 , lowerCAmelCase=0.8 )-> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = qa_sas_generate(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_answers=1 , num_beams=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase , do_sample=lowerCAmelCase , temp=lowerCAmelCase , top_p=lowerCAmelCase , top_k=lowerCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE: List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE: List[Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE: Union[str, Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE: Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE: str = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE: Optional[Any] = action_list.index(action_st)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE: Optional[int] = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE: List[str] = 3
SCREAMING_SNAKE_CASE: Tuple = True
SCREAMING_SNAKE_CASE: Any = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE: int = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE: List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE: List[Any] = '''wiki40b'''
SCREAMING_SNAKE_CASE: Tuple = '''dense'''
SCREAMING_SNAKE_CASE: int = '''beam'''
SCREAMING_SNAKE_CASE: List[Any] = 2
SCREAMING_SNAKE_CASE: Optional[int] = 6_4
SCREAMING_SNAKE_CASE: Tuple = 2_5_6
SCREAMING_SNAKE_CASE: Optional[Any] = None
SCREAMING_SNAKE_CASE: int = None
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE: Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE: Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Optional[Any] = None
# start main text
SCREAMING_SNAKE_CASE: List[Any] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE: List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE: Dict = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE: int = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = make_support(question, source=wiki_source, method='''dense''', n_results=1_0)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0)
SCREAMING_SNAKE_CASE: Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE: List[Any] = support_list[:1_0]
SCREAMING_SNAKE_CASE: Union[str, Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE: Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE: Dict = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE: List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE: Optional[int] = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE: str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE: Dict = find_nearest_training(question)
SCREAMING_SNAKE_CASE: int = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE: List[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE: str = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 360 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE: Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _a ( lowerCAmelCase )-> str:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(lowerCAmelCase , lowerCAmelCase )
return k
def _a ( lowerCAmelCase , lowerCAmelCase )-> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE_ = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusConfig(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ = v.T
SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(lowerCAmelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _a ( lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" )-> Dict:
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ['Adafactor', 'global_step']
for name, shape in tqdm(lowerCAmelCase , desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = array
return tf_weights
def _a ( lowerCAmelCase , lowerCAmelCase )-> Optional[Any]:
# save tokenizer first
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE_ = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE_ = task_specific_params
SCREAMING_SNAKE_CASE_ = convert_pegasus(lowerCAmelCase , lowerCAmelCase )
torch_model.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(lowerCAmelCase , Path(lowerCAmelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE: int = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE: List[Any] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE: Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 360 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( ):
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
UpperCamelCase__ : List[Any] = generate_large_matrix()
UpperCamelCase__ : str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase_ ( _lowerCamelCase: list[list[int]] ):
assert all(row == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for row in grid )
assert all(list(_lowerCamelCase ) == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for col in zip(*_lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: list[int] ):
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__SCREAMING_SNAKE_CASE : str = (left + right) // 2
__SCREAMING_SNAKE_CASE : int = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__SCREAMING_SNAKE_CASE : Optional[int] = mid + 1
else:
__SCREAMING_SNAKE_CASE : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: list[list[int]] ):
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = len(grid[0] )
for i in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCamelCase ) * len(grid[0] )) - total
def lowerCAmelCase_ ( _lowerCamelCase: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def lowerCAmelCase_ ( _lowerCamelCase: list[list[int]] ):
__SCREAMING_SNAKE_CASE : List[str] = 0
for row in grid:
for i, number in enumerate(_lowerCamelCase ):
if number < 0:
total += len(_lowerCamelCase ) - i
break
return total
def lowerCAmelCase_ ( ):
from timeit import timeit
print("""Running benchmarks""" )
__SCREAMING_SNAKE_CASE : List[str] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__SCREAMING_SNAKE_CASE : List[Any] = timeit(F"{func}(grid=grid)" , setup=_lowerCamelCase , number=5_00 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 178 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = '''beit'''
def __init__( self : Tuple , lowerCAmelCase__ : List[Any]=8_1_9_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Dict=3_0_7_2 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Any=1E-12 , lowerCAmelCase__ : Dict=2_2_4 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=[3, 5, 7, 1_1] , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=2_5_6 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Union[str, Any]=2_5_5 , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : Tuple = use_mask_token
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = use_relative_position_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Optional[int] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Any = out_indices
__SCREAMING_SNAKE_CASE : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : List[Any] = use_auxiliary_head
__SCREAMING_SNAKE_CASE : str = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : str = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : int = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Dict = semantic_loss_ignore_index
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return 1E-4 | 178 | 1 |
'''simple docstring'''
def snake_case ( snake_case : str , snake_case : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase = len(snake_case )
lowerCAmelCase = len(snake_case )
lowerCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase = True
for i in range(snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase = True
if a[i].islower():
lowerCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_UpperCamelCase = pd.read_csv('sample_data.csv', header=None)
_UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
_UpperCamelCase = df.iloc[:, 1:2]
_UpperCamelCase = actual_data.values.reshape(len_data, 1)
_UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
_UpperCamelCase = 10
_UpperCamelCase = 5
_UpperCamelCase = 20
_UpperCamelCase = len_data - periods * look_back
_UpperCamelCase = actual_data[:division]
_UpperCamelCase = actual_data[division - look_back :]
_UpperCamelCase , _UpperCamelCase = [], []
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_UpperCamelCase = np.array(train_x)
_UpperCamelCase = np.array(test_x)
_UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
_UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
_UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_UpperCamelCase = model.predict(x_test)
| 179 | 0 |
def __lowercase( __snake_case : int ,__snake_case : int ,__snake_case : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__snake_case = _modexpt(__snake_case ,exponent // 2 ,__snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__snake_case ,exponent - 1 ,__snake_case )) % modulo_value
def __lowercase( __snake_case : int = 17_77 ,__snake_case : int = 18_55 ,__snake_case : int = 8 ) -> int:
__snake_case = base
for _ in range(1 ,__snake_case ):
__snake_case = _modexpt(__snake_case ,__snake_case ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowercase( __snake_case : List[str] ,__snake_case : Optional[int] ,__snake_case : Dict ,__snake_case : List[str] ,__snake_case : Optional[int] ) -> int:
# Load configuration defined in the metadata file
with open(__snake_case ) as metadata_file:
__snake_case = json.load(__snake_case )
__snake_case = LukeConfig(use_entity_aware_attention=__snake_case ,**metadata['model_config'] )
# Load in the weights from the checkpoint_path
__snake_case = torch.load(__snake_case ,map_location='cpu' )['module']
# Load the entity vocab file
__snake_case = load_original_entity_vocab(__snake_case )
# add an entry for [MASK2]
__snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case = AddedToken('<ent>' ,lstrip=__snake_case ,rstrip=__snake_case )
__snake_case = AddedToken('<ent2>' ,lstrip=__snake_case ,rstrip=__snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ,'r' ) as f:
__snake_case = json.load(__snake_case )
__snake_case = 'MLukeTokenizer'
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
with open(os.path.join(__snake_case ,MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) ,'w' ) as f:
json.dump(__snake_case ,__snake_case )
__snake_case = MLukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
__snake_case = tokenizer.convert_tokens_to_ids(['@'] )[0]
__snake_case = tokenizer.convert_tokens_to_ids(['#'] )[0]
__snake_case = state_dict['embeddings.word_embeddings.weight']
__snake_case = word_emb[ent_init_index].unsqueeze(0 )
__snake_case = word_emb[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case = state_dict[bias_name]
__snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case = f'''encoder.layer.{layer_index}.attention.self.'''
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
__snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
__snake_case = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case = state_dict['entity_predictions.bias']
__snake_case = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case = LukeForMaskedLM(config=__snake_case ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__snake_case = state_dict[key]
else:
__snake_case = state_dict[key]
__snake_case , __snake_case = model.load_state_dict(__snake_case ,strict=__snake_case )
if set(__snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case = MLukeTokenizer.from_pretrained(__snake_case ,task='entity_classification' )
__snake_case = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__snake_case = (0, 9)
__snake_case = tokenizer(__snake_case ,entity_spans=[span] ,return_tensors='pt' )
__snake_case = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 33, 7_68) )
__snake_case = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case = torch.Size((1, 1, 7_68) )
__snake_case = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__snake_case ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case = MLukeTokenizer.from_pretrained(__snake_case )
__snake_case = 'Tokyo is the capital of <mask>.'
__snake_case = (24, 30)
__snake_case = tokenizer(__snake_case ,entity_spans=[span] ,return_tensors='pt' )
__snake_case = model(**__snake_case )
__snake_case = encoding['input_ids'][0].tolist()
__snake_case = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__snake_case )
__snake_case = outputs.entity_logits[0][0].argmax().item()
__snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__snake_case ) )
model.save_pretrained(__snake_case )
def __lowercase( __snake_case : int ) -> Any:
__snake_case = ['[MASK]', '[PAD]', '[UNK]']
__snake_case = [json.loads(__snake_case ) for line in open(__snake_case )]
__snake_case = {}
for entry in data:
__snake_case = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case = entity_id
break
__snake_case = f'''{language}:{entity_name}'''
__snake_case = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 345 | 0 |
import warnings
from functools import wraps
from typing import Callable
def snake_case__ ( lowercase ):
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 613 | def snake_case__ ( lowercase , lowercase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 613 | 1 |
def A_ ( A__ = 1000 ) -> int:
a__ : List[Any] = -1
a__ : Union[str, Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
a__ : Any = (n * n - 2 * a * n) // (2 * n - 2 * a)
a__ : Tuple = n - a - b
if c * c == (a * a + b * b):
a__ : Any = a * b * c
if candidate >= product:
a__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 720 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A_ ( A__ , A__ = True , A__ = math.inf , A__ = -math.inf , A__ = math.inf , A__ = -math.inf , A__ = False , A__ = 100 , A__ = 0.01 , A__ = 1 , ) -> Any:
a__ : List[str] = False
a__ : Optional[int] = search_prob
a__ : Any = start_temperate
a__ : Any = []
a__ : int = 0
a__ : Any = None
while not search_end:
a__ : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
a__ : Optional[Any] = current_state
scores.append(A__ )
iterations += 1
a__ : Union[str, Any] = None
a__ : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
a__ : Optional[int] = random.randint(0 , len(A__ ) - 1 ) # picking a random neighbor
a__ : List[str] = neighbors.pop(A__ )
a__ : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
a__ : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
a__ : Optional[Any] = picked_neighbor
else:
a__ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
a__ : Tuple = picked_neighbor
a__ : Union[str, Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
a__ : int = True
else:
a__ : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A__ ) , A__ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def A_ ( A__ , A__ ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase : List[Any] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase : int = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowercase : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase : List[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def A_ ( A__ , A__ ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
lowercase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowercase : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 392 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '''▁'''
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
SCREAMING_SNAKE_CASE_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = []
_snake_case = []
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_ = None , **snake_case_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
__lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase = 1
__lowerCAmelCase = len(self.sp_model )
__lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case_ )
}
__lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCAmelCase = src_lang if src_lang is not None else """en_XX"""
__lowerCAmelCase = self.lang_code_to_id[self._src_lang]
__lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A__ ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def A__ ( self , snake_case_ ) -> None:
__lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , snake_case_ ) -> None:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self ) -> Dict:
__lowerCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , snake_case_ ) -> List[str]:
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def A__ ( self , snake_case_ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , snake_case_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , snake_case_ ) -> Dict:
__lowerCAmelCase = []
__lowerCAmelCase = """"""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(snake_case_ )
__lowerCAmelCase = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def A__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def A__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
__lowerCAmelCase = [1] * len(self.prefix_tokens )
__lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def A__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowerCAmelCase = src_lang
__lowerCAmelCase = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__lowerCAmelCase = self.convert_tokens_to_ids(snake_case_ )
__lowerCAmelCase = tgt_lang_id
return inputs
def A__ ( self , snake_case_ , snake_case_ = "en_XX" , snake_case_ = None , snake_case_ = "ro_RO" , **snake_case_ , ) -> BatchEncoding:
__lowerCAmelCase = src_lang
__lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def A__ ( self ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self , snake_case_ ) -> None:
__lowerCAmelCase = self.lang_code_to_id[src_lang]
__lowerCAmelCase = [self.cur_lang_code_id]
__lowerCAmelCase = [self.eos_token_id]
def A__ ( self , snake_case_ ) -> None:
__lowerCAmelCase = self.lang_code_to_id[tgt_lang]
__lowerCAmelCase = [self.cur_lang_code_id]
__lowerCAmelCase = [self.eos_token_id]
| 465 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
SCREAMING_SNAKE_CASE_ = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def lowercase (_lowerCAmelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.check_lib:
SCREAMING_SNAKE_CASE_ = importlib.import_module('''transformers''')
SCREAMING_SNAKE_CASE_ = Path(transformers_module.__file__).parent
else:
SCREAMING_SNAKE_CASE_ = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 465 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __snake_case ( UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = filter(lambda UpperCAmelCase_ : p.requires_grad , model.parameters() )
lowerCamelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ : List[str] = logging.getLogger(__name__)
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
if metric == "rouge2":
lowerCamelCase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowerCamelCase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowerCamelCase_ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
lowerCamelCase_ = ModelCheckpoint(
dirpath=UpperCAmelCase_ , filename=UpperCAmelCase_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=UpperCAmelCase_ , verbose=UpperCAmelCase_ , )
class snake_case ( pl.Callback ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=True ):
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowerCamelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowerCamelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase_ = od / "test_results.txt"
lowerCamelCase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowerCamelCase_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase )
generations_file.parent.mkdir(exist_ok=UpperCamelCase )
with open(UpperCamelCase , "a+" ) as writer:
for key in sorted(UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase_ = metrics[key]
if isinstance(UpperCamelCase , torch.Tensor ):
lowerCamelCase_ = val.item()
lowerCamelCase_ = f'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase_ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCamelCase )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
lowerCamelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase_ = pl_module.model.num_parameters()
lowerCamelCase_ = count_trainable_parameters(UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase , UpperCamelCase , "test" )
@rank_zero_only
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 445 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=4 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase , )
return config, input_ids, attention_mask
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxDistilBertModelTester(self )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("distilbert-base-uncased" )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowerCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase )
lowerCamelCase_ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 445 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Dict = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''mobilenet_v1'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.999 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.001 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : str = depth_multiplier
snake_case__ : Tuple = min_depth
snake_case__ : List[Any] = hidden_act
snake_case__ : int = tf_padding
snake_case__ : Tuple = classifier_dropout_prob
snake_case__ : Optional[Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
return 1e-4
| 38 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 1 |
def A ( _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('Input must be a positive integer' )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCAmelCase ):
_UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 639 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
A_ : Optional[Any] = 'glpn'
def __init__(self : Optional[int] , a__ : Optional[Any]=3 , a__ : str=4 , a__ : str=[2, 2, 2, 2] , a__ : str=[8, 4, 2, 1] , a__ : Dict=[32, 64, 160, 256] , a__ : Tuple=[7, 3, 3, 3] , a__ : Optional[int]=[4, 2, 2, 2] , a__ : Optional[int]=[1, 2, 5, 8] , a__ : Tuple=[4, 4, 4, 4] , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : List[Any]=0.0 , a__ : Optional[int]=0.0_2 , a__ : Union[str, Any]=0.1 , a__ : str=1E-6 , a__ : Dict=64 , a__ : List[str]=10 , a__ : Tuple=-1 , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = depths
__snake_case = sr_ratios
__snake_case = hidden_sizes
__snake_case = patch_sizes
__snake_case = strides
__snake_case = mlp_ratios
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = drop_path_rate
__snake_case = layer_norm_eps
__snake_case = decoder_hidden_size
__snake_case = max_depth
__snake_case = head_in_index
| 592 |
"""simple docstring"""
_snake_case = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 510 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
return 0.0
def __lowerCamelCase ( __a : np.ndarray , __a : int ) -> tuple[int | float, int | float]:
_lowercase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowercase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __a : FilterType , __a : int ) -> None:
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(__a ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.abs(np.fft.fft(__a ) )
_lowercase =20 * np.logaa(__a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowercase =get_bounds(__a , __a )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__a )
plt.show()
def __lowerCamelCase ( __a : FilterType , __a : int ) -> None:
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(__a ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.angle(np.fft.fft(__a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__a , -2 * pi ) )
plt.show()
| 594 | import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Tuple:
_lowercase =torch.nn.Linear(2 , 4 )
_lowercase =torch.optim.AdamW(model.parameters() , lr=1.0 )
_lowercase =torch.optim.lr_scheduler.OneCycleLR(__a , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
_lowercase =DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowercase =DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( __a : Tuple ) -> Optional[int]:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( __a : str ) -> Tuple:
_lowercase =torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__a )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
@require_cuda
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCAmelCase_ ):
_lowercase =Accelerator(cpu=lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase =GradientState()
assert state.num_steps == 1
_lowercase =4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowercase =False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCAmelCase ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
with patch("torch.cuda.set_device" , lowerCAmelCase_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_lowercase =Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =get_signature(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase_ )
# make sure random weights don't match
load_random_weights(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase =get_signature(lowerCAmelCase_ )
# saving hook
def save_config(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase ={"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCAmelCase_ , "data.json" ) , "w" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# loading hook
def load_config(lowerCAmelCase_ , lowerCAmelCase_ ):
with open(os.path.join(lowerCAmelCase_ , "data.json" ) , "r" ) as f:
_lowercase =json.load(lowerCAmelCase_ )
_lowercase =config["class_name"]
_lowercase =accelerator.register_save_state_pre_hook(lowerCAmelCase_ )
_lowercase =accelerator.register_load_state_pre_hook(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase_ )
# make sure random weights don't match with hooks
load_random_weights(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowercase ="random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase_ )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowercase ="random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
_lowercase =None
# This should work
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(dummy_obj is None )
def __lowerCAmelCase ( self ):
_lowercase =Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components()
_lowercase =[1, 2, 3]
# This should work
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map={"": 0} , )
_lowercase =Accelerator()
# This should work
_lowercase =accelerator.prepare(lowerCAmelCase_ )
@slow
@require_bnb
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
_lowercase =Accelerator()
with init_empty_weights():
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_lowercase =infer_auto_device_map(lowerCAmelCase_ )
_lowercase ="cpu"
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , llm_inta_enable_fpaa_cpu_offload=lowerCAmelCase_ )
# This should not work and get value error
with self.assertRaises(lowerCAmelCase_ ):
_lowercase =accelerator.prepare(lowerCAmelCase_ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
_lowercase ={"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_lowercase =infer_auto_device_map(lowerCAmelCase_ )
_lowercase =1
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map=lowerCAmelCase_ , )
_lowercase =Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCAmelCase_ ):
_lowercase =accelerator.prepare(lowerCAmelCase_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_lowercase =infer_auto_device_map(lowerCAmelCase_ )
_lowercase =1
_lowercase =AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map=lowerCAmelCase_ , )
_lowercase =Accelerator()
# This should work
_lowercase =accelerator.prepare(lowerCAmelCase_ )
@require_cuda
def __lowerCAmelCase ( self ):
_lowercase =torch.nn.Linear(10 , 10 )
_lowercase =torch.optim.SGD(model.parameters() , lr=0.0_1 )
_lowercase =Accelerator(cpu=lowerCAmelCase_ )
_lowercase =accelerator.prepare(lowerCAmelCase_ )
| 594 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : str ):
lowerCamelCase_ = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowerCamelCase_ = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename='''pytorch_model.bin''' ) )
lowerCamelCase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowerCamelCase_ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowerCamelCase_ = tensor_value
lowerCamelCase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 142 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowercase : int = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__lowercase : Dict = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ):
lowerCamelCase_ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Any = VOCAB_FILES_NAMES
__lowercase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Dict = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ = json.load(UpperCamelCase__ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(UpperCamelCase__ ):
try:
lowerCamelCase_ = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = word
return word
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , UpperCamelCase__ ):
lowerCamelCase_ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ''''''.join(UpperCamelCase__ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
lowerCamelCase_ = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ''' ''' + text
return (text, kwargs)
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids | 142 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 10
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : Optional[int] = SpeechaTextTokenizer
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Any = True
def _snake_case ( self : Any):
super().setUp()
SCREAMING_SNAKE_CASE_ :int = sp.SentencePieceProcessor()
spm_model.Load(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase))]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase))))
SCREAMING_SNAKE_CASE_ :List[Any] = Path(self.tmpdirname)
save_json(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"])
SCREAMING_SNAKE_CASE_ :Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :Tuple = "<pad>"
SCREAMING_SNAKE_CASE_ :Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase) , UpperCAmelCase)
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(UpperCAmelCase) , 10_01)
def _snake_case ( self : List[str]):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
SCREAMING_SNAKE_CASE_ :Tuple = tokenizer.convert_ids_to_tokens(UpperCAmelCase)
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _snake_case ( self : List[str]):
# fmt: off
SCREAMING_SNAKE_CASE_ :Optional[int] = {"input_ids": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _UpperCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = """valhalla/s2t_mustc_multilinguial_medium"""
lowerCamelCase_ : Dict = """C'est trop cool"""
lowerCamelCase_ : Any = """Esto es genial"""
@classmethod
def _snake_case ( cls : Dict):
SCREAMING_SNAKE_CASE_ :SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def _snake_case ( self : Optional[int]):
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11)
def _snake_case ( self : Optional[int]):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def _snake_case ( self : Tuple):
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids)
SCREAMING_SNAKE_CASE_ :Any = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE_ :List[Any] = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase)
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase)
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Optional[Any] = "fr"
SCREAMING_SNAKE_CASE_ :List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , UpperCAmelCase)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Dict = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
SCREAMING_SNAKE_CASE_ :Optional[Any] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 140 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Collection[float] | None = None):
if components is None:
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :Optional[int] = list(UpperCAmelCase)
def __len__( self : Optional[Any]):
return len(self.__components)
def __str__( self : List[Any]):
return "(" + ",".join(map(UpperCAmelCase , self.__components)) + ")"
def __add__( self : Optional[int] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :List[str] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.__components[i] + other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else:
raise Exception("must have the same size")
def __sub__( self : List[str] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Dict = [self.__components[i] - other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else: # error case
raise Exception("must have the same size")
@overload
def __mul__( self : List[Any] , UpperCAmelCase : float):
...
@overload
def __mul__( self : int , UpperCAmelCase : Vector):
...
def __mul__( self : int , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , (float, int)):
SCREAMING_SNAKE_CASE_ :Tuple = [c * other for c in self.__components]
return Vector(UpperCAmelCase)
elif isinstance(UpperCAmelCase , UpperCAmelCase) and len(self) == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[int] = len(self)
SCREAMING_SNAKE_CASE_ :str = [self.__components[i] * other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return sum(UpperCAmelCase)
else: # error case
raise Exception("invalid operand!")
def _snake_case ( self : Any):
return Vector(self.__components)
def _snake_case ( self : str , UpperCAmelCase : int):
if isinstance(UpperCAmelCase , UpperCAmelCase) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : float):
assert -len(self.__components) <= pos < len(self.__components)
SCREAMING_SNAKE_CASE_ :List[str] = value
def _snake_case ( self : str):
if len(self.__components) == 0:
raise Exception("Vector is empty")
SCREAMING_SNAKE_CASE_ :Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase))
def _snake_case ( self : str , UpperCAmelCase : Vector , UpperCAmelCase : bool = False):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self * other
SCREAMING_SNAKE_CASE_ :Dict = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def lowercase ( a ):
'''simple docstring'''
assert isinstance(a , a )
return Vector([0] * dimension )
def lowercase ( a , a ):
'''simple docstring'''
assert isinstance(a , a ) and (isinstance(a , a ))
SCREAMING_SNAKE_CASE_ :str = [0] * dimension
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1
return Vector(a )
def lowercase ( a , a , a ):
'''simple docstring'''
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def lowercase ( a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :int = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _UpperCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : list[list[float]] , UpperCAmelCase : int , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :str = matrix
SCREAMING_SNAKE_CASE_ :List[Any] = w
SCREAMING_SNAKE_CASE_ :List[Any] = h
def __str__( self : List[str]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :Any = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[i][j] + other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :List[Any] = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [
self.__matrix[i][j] - other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrices must have the same dimension!")
@overload
def __mul__( self : Tuple , UpperCAmelCase : float):
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase : Vector):
...
def __mul__( self : List[str] , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , UpperCAmelCase): # matrix-vector
if len(UpperCAmelCase) == self.__width:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = zero_vector(self.__height)
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCAmelCase)
for j in range(self.__width)
]
ans.change_component(UpperCAmelCase , sum(UpperCAmelCase))
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!")
elif isinstance(UpperCAmelCase , (int, float)): # matrix-scalar
SCREAMING_SNAKE_CASE_ :Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(UpperCAmelCase , self.__width , self.__height)
return None
def _snake_case ( self : Optional[int]):
return self.__height
def _snake_case ( self : Optional[int]):
return self.__width
def _snake_case ( self : str , UpperCAmelCase : int , UpperCAmelCase : int):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float):
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE_ :Dict = value
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
SCREAMING_SNAKE_CASE_ :Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase)):
SCREAMING_SNAKE_CASE_ :Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase , self.__width - 1 , self.__height - 1).determinant()
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase , UpperCAmelCase)
else:
raise Exception("Indices out of bounds")
def _snake_case ( self : Union[str, Any]):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
raise Exception("Matrix has no element")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase) for y in range(self.__width)
]
return sum(UpperCAmelCase)
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def lowercase ( a , a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 140 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = DDIMPipeline
a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a = False
def lowercase_ ( self : Any ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler()
SCREAMING_SNAKE_CASE__ = {'''unet''': unet, '''scheduler''': scheduler}
return components
def lowercase_ ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=0 ) -> Any:
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
SCREAMING_SNAKE_CASE__ = np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04] )
SCREAMING_SNAKE_CASE__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1e-3 )
def lowercase_ ( self : List[Any] ) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self : Union[str, Any] ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase_ ( self : Optional[int] ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''google/ddpm-cifar10-32'''
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = DDIMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
ddim.to(__lowerCamelCase )
ddim.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ddim(generator=__lowerCamelCase , eta=0.0 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''google/ddpm-ema-bedroom-256'''
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = DDIMPipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
ddpm.to(__lowerCamelCase )
ddpm.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ddpm(generator=__lowerCamelCase , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 493 |
import numpy as np
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase )
def lowercase_ ( self : List[str] , __lowerCamelCase : str ) -> np.ndarray:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase_ ( self : List[Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
SCREAMING_SNAKE_CASE__ = message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE__ = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape(2 * len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE__ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = encoded_message + letter
return encoded_message
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape((2, len(__lowerCamelCase )) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE__ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = decoded_message + letter
return decoded_message
| 493 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 713 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 1 |
'''simple docstring'''
def __UpperCamelCase( _A : int ):
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 496 | '''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCamelCase( _A : str ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __UpperCamelCase( _A : int , _A : str ):
'''simple docstring'''
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCamelCase( _A : Optional[Any] , _A : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.dot(_A , _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def __UpperCamelCase( _A : Dict , _A : Optional[int] , _A : List[Any] , _A : str=7_00_00 ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.zeros(x.shape[1] )
for iterations in range(_A ):
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Optional[Any] = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Tuple = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = cost_function(_A , _A )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ : str = datasets.load_iris()
UpperCamelCase__ : Tuple = iris.data[:, :2]
UpperCamelCase__ : str = (iris.target != 0) * 1
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def __UpperCamelCase( _A : Any ):
'''simple docstring'''
return sigmoid_function(
np.dot(_A , _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCamelCase__) , (UpperCamelCase__)) : Any = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Optional[Any] = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ : str = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 496 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = "\n".join(__snake_case )
Path(__snake_case ).open("""w""" ).writelines(__snake_case )
__lowerCAmelCase = "patrickvonplaten/t5-tiny-random"
__lowerCAmelCase = "sshleifer/bart-tiny-random"
__lowerCAmelCase = "sshleifer/tiny-mbart"
__lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCAmelCase ( _A ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_snake_case = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_snake_case = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(UpperCAmelCase , UpperCAmelCase )
_snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
_snake_case = "translation_en_to_de" if model == T5_TINY else "summarization"
_snake_case = f"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split()
with patch.object(UpperCAmelCase , """argv""" , UpperCAmelCase ):
run_generate()
assert Path(UpperCAmelCase ).exists()
# os.remove(Path(output_file_name))
def lowercase (self ) -> Any:
self.run_eval_tester(UpperCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowercase (self , UpperCAmelCase ) -> List[Any]:
self.run_eval_tester(UpperCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_snake_case = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_snake_case = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
_snake_case = Path(self.get_auto_remove_tmp_dir() )
_snake_case = str(tmp_dir / """scores.json""" )
_snake_case = str(tmp_dir / """val.target""" )
_dump_articles(UpperCAmelCase , text["""en"""] )
_dump_articles(UpperCAmelCase , text["""de"""] )
_snake_case = "translation_en_to_de" if model == T5_TINY else "summarization"
_snake_case = f"""\n run_eval_search.py\n {model}\n {str(UpperCAmelCase )}\n {str(UpperCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(UpperCAmelCase , """argv""" , UpperCAmelCase ):
with CaptureStdout() as cs:
run_search()
_snake_case = [" num_beams | length_penalty", model, "Best score args"]
_snake_case = ["Info"]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(UpperCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCAmelCase ).exists()
os.remove(Path(UpperCAmelCase ) ) | 585 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _snake_case :
_A = 42
_A = None
# Automatically constructed
_A = "dict"
_A = None
_A = field(default='Translation' , init=_A , repr=_A )
def __call__( self ) -> Optional[int]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class _snake_case :
_A = None
_A = None
_A = None
# Automatically constructed
_A = "dict"
_A = None
_A = field(default='TranslationVariableLanguages' , init=_A , repr=_A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :int = sorted(set(self.languages ) ) if self.languages else None
snake_case__ :Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Tuple = set(self.languages )
if self.languages and set(UpperCamelCase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(UpperCamelCase ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case__ :Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase ,UpperCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case__ , snake_case__ :Dict = zip(*sorted(UpperCamelCase ) )
return {"language": languages, "translation": translations}
def lowerCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
} | 241 | 0 |
from __future__ import annotations
from collections import deque
class lowercase :
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(A_ )
self.set_fail_transitions()
def __UpperCamelCase ( self , A_ , A_ ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCamelCase ( self , A_ ) -> None:
"""simple docstring"""
UpperCamelCase = 0
for character in keyword:
UpperCamelCase = self.find_next_state(A_ , A_ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase = len(self.adlist ) - 1
else:
UpperCamelCase = next_state
self.adlist[current_state]["output"].append(A_ )
def __UpperCamelCase ( self ) -> None:
"""simple docstring"""
UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(A_ )
UpperCamelCase = 0
while q:
UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A_ )
UpperCamelCase = self.adlist[r]['fail_state']
while (
self.find_next_state(A_ , self.adlist[child]['value'] ) is None
and state != 0
):
UpperCamelCase = self.adlist[state]['fail_state']
UpperCamelCase = self.find_next_state(
A_ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase = 0
UpperCamelCase = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __UpperCamelCase ( self , A_ ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase = 0
for i in range(len(A_ ) ):
while (
self.find_next_state(A_ , string[i] ) is None
and current_state != 0
):
UpperCamelCase = self.adlist[current_state]['fail_state']
UpperCamelCase = self.find_next_state(A_ , string[i] )
if next_state is None:
UpperCamelCase = 0
else:
UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase = []
result[key].append(i - len(A_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 1 |
import math
import qiskit
def __lowerCAmelCase ( UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
or isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
or isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(UpperCAmelCase__ ) != input_a)
or (math.floor(UpperCAmelCase__ ) != input_a)
or (math.floor(UpperCAmelCase__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowerCamelCase_ = qiskit.QuantumRegister(4 , """qr""" )
lowerCamelCase_ = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
lowerCamelCase_ = [input_a, input_a, carry_in]
lowerCamelCase_ = qiskit.QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCAmelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCAmelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCAmelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCAmelCase__ ) # measure the last two qbits
lowerCamelCase_ = qiskit.Aer.get_backend("""aer_simulator""" )
lowerCamelCase_ = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1_0_0_0 )
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 272 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase_ = CLIPTextModel(__UpperCamelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("""RGB""" )
if str(__UpperCamelCase ).startswith("""mps""" ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCamelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = """french fries"""
lowerCamelCase_ = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = [inputs["""prompt"""]] * 2
lowerCamelCase_ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
lowerCamelCase_ = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
lowerCamelCase_ = image / 2 + 0.5
lowerCamelCase_ = image.permute(0 , 3 , 1 , 2 )
lowerCamelCase_ = image.repeat(2 , 1 , 1 , 1 )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
lowerCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = sd_pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
lowerCamelCase_ = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" ) )[0]
lowerCamelCase_ = components["""vae"""]
lowerCamelCase_ = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ = pipe(**__UpperCamelCase )[0]
lowerCamelCase_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any]=0 ):
lowerCamelCase_ = torch.manual_seed(__UpperCamelCase )
lowerCamelCase_ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowerCamelCase_ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase )
lowerCamelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = 0
def callback_fn(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCamelCase_ = False
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ = inputs["""image"""].resize((5_0_4, 5_0_4) )
lowerCamelCase_ = """timbrooks/instruct-pix2pix"""
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = pipe(**__UpperCamelCase )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 272 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ) -> Union[str, Any]:
_lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=UpperCamelCase )
_lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase )
env_command_parser(subparsers=UpperCamelCase )
launch_command_parser(subparsers=UpperCamelCase )
tpu_command_parser(subparsers=UpperCamelCase )
test_command_parser(subparsers=UpperCamelCase )
# Let's go
_lowerCamelCase = parser.parse_args()
if not hasattr(UpperCamelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main() | 234 | def lowerCamelCase ( UpperCamelCase : str ) -> list:
_lowerCamelCase = [0] * len(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
# use last results for better performance - dynamic programming
_lowerCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowerCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowerCamelCase = j
return prefix_result
def lowerCamelCase ( UpperCamelCase : str ) -> int:
return max(prefix_function(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 234 | 1 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowercase_ = logging.WARNING
def lowercase ( ) -> Any:
__a = os.getenv('''DATASETS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase ( ) -> str:
return __name__.split('''.''' )[0]
def lowercase ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowercase ( ) -> None:
# Apply our default configuration to the library root logger.
__a = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase ( ) -> None:
__a = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase ( lowerCAmelCase__ : Optional[str] = None ) -> logging.Logger:
if name is None:
__a = _get_library_name()
return logging.getLogger(lowerCAmelCase__ )
def lowercase ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def lowercase ( lowerCAmelCase__ : int ) -> None:
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def lowercase ( ) -> Tuple:
return set_verbosity(lowerCAmelCase__ )
def lowercase ( ) -> Union[str, Any]:
return set_verbosity(lowerCAmelCase__ )
def lowercase ( ) -> Optional[int]:
return set_verbosity(lowerCAmelCase__ )
def lowercase ( ) -> Optional[int]:
return set_verbosity(lowerCAmelCase__ )
def lowercase ( ) -> None:
__a = False
def lowercase ( ) -> None:
__a = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *_a , **_a ): # pylint: disable=unused-argument
__a = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , _a ):
def empty_fn(*_a , **_a ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , _a , _a , _a ):
return
lowercase_ = True
class __lowerCAmelCase :
'''simple docstring'''
def __call__( self , *_a , _a=False , **_a ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_a , **_a )
else:
return EmptyTqdm(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
__a = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a , **_a )
def __UpperCAmelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase_ = _tqdm_cls()
def lowercase ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowercase ( ) -> Dict:
global _tqdm_active
__a = True
def lowercase ( ) -> Optional[int]:
global _tqdm_active
__a = False
| 695 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BertTokenizer
__UpperCAmelCase : Optional[Any] = BertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = '''a\n\'ll !!to?\'d of, can\'t.'''
__a = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 695 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = KandinskyVaaImgaImgPipeline
_a = ['image_embeds', 'negative_image_embeds', 'image']
_a = [
'image_embeds',
'negative_image_embeds',
'image',
]
_a = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_a = False
@property
def snake_case ( self : Optional[Any] )-> List[Any]:
return 32
@property
def snake_case ( self : Optional[Any] )-> int:
return 32
@property
def snake_case ( self : List[str] )-> Any:
return self.time_input_dim
@property
def snake_case ( self : Tuple )-> Dict:
return self.time_input_dim * 4
@property
def snake_case ( self : int )-> Dict:
return 100
@property
def snake_case ( self : str )-> Optional[int]:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase__ : Union[str, Any] =UNetaDConditionModel(**lowerCamelCase )
return model
@property
def snake_case ( self : Tuple )-> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : Dict )-> Any:
torch.manual_seed(0 )
lowerCamelCase__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : Dict =self.dummy_unet
lowerCamelCase__ : int =self.dummy_movq
lowerCamelCase__ : str ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase__ : Dict =DDIMScheduler(**lowerCamelCase )
lowerCamelCase__ : int ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case ( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Optional[int]=0 )-> List[Any]:
lowerCamelCase__ : str =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
# create init_image
lowerCamelCase__ : Optional[int] =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCamelCase__ : Dict =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : str =Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : Optional[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : List[str] =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : int ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self : Optional[Any] )-> Optional[Any]:
lowerCamelCase__ : str ='''cpu'''
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : List[str] =self.pipeline_class(**lowerCamelCase )
lowerCamelCase__ : Dict =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =pipe(**self.get_dummy_inputs(lowerCamelCase ) )
lowerCamelCase__ : int =output.images
lowerCamelCase__ : Optional[Any] =pipe(
**self.get_dummy_inputs(lowerCamelCase ), return_dict=lowerCamelCase, )[0]
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
lowerCamelCase__ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Any =np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : int )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCamelCase__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase__ : Union[str, Any] ='''A red cartoon frog, 4k'''
lowerCamelCase__ : Union[str, Any] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
lowerCamelCase__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''', torch_dtype=torch.floataa )
lowerCamelCase__ : List[Any] =pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : int =pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
lowerCamelCase__ : Optional[int] =pipeline(
image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=100, height=768, width=768, strength=0.2, output_type='''np''', )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
| 625 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
snake_case : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
snake_case : Tuple = "xvjiarui/stable-diffusion-2-inpainting"
snake_case ,snake_case : Optional[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase__ , safety_checker=UpperCamelCase__ )
snake_case : str = "Face of a yellow cat, high resolution, sitting on a park bench"
snake_case : Dict = jax.random.PRNGKey(0 )
snake_case : Optional[Any] = 50
snake_case : Tuple = jax.device_count()
snake_case : Tuple = num_samples * [prompt]
snake_case : List[Any] = num_samples * [init_image]
snake_case : str = num_samples * [mask_image]
snake_case ,snake_case ,snake_case : Tuple = pipeline.prepare_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# shard inputs and rng
snake_case : str = replicate(UpperCamelCase__ )
snake_case : int = jax.random.split(UpperCamelCase__ , jax.device_count() )
snake_case : Tuple = shard(UpperCamelCase__ )
snake_case : List[str] = shard(UpperCamelCase__ )
snake_case : List[str] = shard(UpperCamelCase__ )
snake_case : str = pipeline(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ )
snake_case : Optional[int] = output.images.reshape(UpperCamelCase__ , 512 , 512 , 3 )
snake_case : Dict = images[0, 253:256, 253:256, -1]
snake_case : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case : Dict = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 178 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__snake_case = ["""gpt2"""]
__snake_case = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case : Dict = tokenizer
snake_case : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = self.tokenizer(UpperCamelCase__ )
snake_case : Tuple = tokenized["input_ids"].to_tensor()
snake_case : str = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case : int = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )["logits"]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case : str = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case : Optional[int] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case : Any = tokenizer([test_inputs] , return_tensors="tf" )
snake_case : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case : Any = python_outputs[key].numpy()
snake_case : Any = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : str = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
snake_case : str = tf.constant(UpperCamelCase__ )
snake_case : Optional[int] = compiled_tokenizer(UpperCamelCase__ )
snake_case : Dict = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Tuple = ModelToSave(tokenizer=UpperCamelCase__ )
snake_case : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case : int = Path(UpperCamelCase__ ) / "saved.model"
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"serving_default": model.serving} )
snake_case : Optional[Any] = tf.saved_model.load(UpperCamelCase__ )
snake_case : Union[str, Any] = loaded_model.signatures["serving_default"](UpperCamelCase__ )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Any = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[str] = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
snake_case : Dict = tf_tokenizer.get_config()
snake_case : Union[str, Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
snake_case : Optional[Any] = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case : List[str] = 12_3123
for max_length in [3, 5, 1024]:
snake_case : str = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
snake_case : str = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 178 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = "bert"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=0, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : Optional[Any] = vocab_size
A : Optional[Any] = hidden_size
A : Union[str, Any] = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : Dict = hidden_act
A : Optional[int] = intermediate_size
A : Dict = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Union[str, Any] = max_position_embeddings
A : Any = type_vocab_size
A : Any = initializer_range
A : Union[str, Any] = layer_norm_eps
A : Optional[Any] = position_embedding_type
A : Dict = use_cache
A : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 716 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( *lowerCamelCase__, **lowerCamelCase__ ):
pass
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = np.array(_lowerCAmelCase )
A : str = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = MaskGenerationPipeline(model=lowerCamelCase__, image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def _lowerCAmelCase ( self ):
pass
@slow
@require_torch
def _lowerCAmelCase ( self ):
A : Tuple = pipeline("""mask-generation""", model="""facebook/sam-vit-huge""" )
A : Dict = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""", points_per_batch=256 )
# Shortening by hashing
A : Union[str, Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
], )
# fmt: on
@require_torch
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """facebook/sam-vit-huge"""
A : int = pipeline("""mask-generation""", model=lowerCamelCase__ )
A : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""", pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
A : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=4 ), [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
], )
| 520 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = XGLMTokenizer
__a = XGLMTokenizerFast
__a = True
__a = True
def UpperCamelCase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__: Union[str, Any]= XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= '''<pad>'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(lowerCAmelCase ) , 1008 )
def UpperCamelCase_ ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__: int= tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__: str= tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def UpperCamelCase_ ( self ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name )
SCREAMING_SNAKE_CASE__: str= XGLMTokenizer(f.name , keep_accents=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= pickle.dumps(lowerCAmelCase )
pickle.loads(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__: List[Any]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: List[Any]= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: List[str]= '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__: Tuple= tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: int= tokenizer.encode(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= '''Hello World!'''
SCREAMING_SNAKE_CASE__: Optional[int]= [2, 31227, 4447, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE__: Dict= [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: Any= {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='''facebook/xglm-564M''' , padding=lowerCAmelCase , )
| 64 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = None
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class(**self.feat_extract_dict )
a__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(_a , 'feat_extract.json' )
feat_extract_first.to_json_file(_a )
a__ = self.feature_extraction_class.from_json_file(_a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
a__ = self.feature_extraction_class.from_pretrained(_a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.feature_extraction_class()
self.assertIsNotNone(_a )
| 126 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[int] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Tuple = np.array(data['target'])
__A : int = data['target_names']
__A , __A , __A , __A : Optional[Any] = train_test_split(X, y)
def lowerCAmelCase_ ( a : str , a : int ):
return np.linalg.norm(np.array(a ) - np.array(a ) )
def lowerCAmelCase_ ( a : Dict , a : str , a : List[Any] , a : Any , a : Dict=5 ):
a__ = zip(a , a )
# List of distances of all points from the point to be classified
a__ = []
for data_point in data:
a__ = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a__ = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a__ = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__magic_name__ =logging.get_logger(__name__)
class _A ( snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ="""vision-encoder-decoder"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
def __init__(self , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
UpperCamelCase__ = kwargs.pop('''encoder''' )
UpperCamelCase__ = encoder_config.pop('''model_type''' )
UpperCamelCase__ = kwargs.pop('''decoder''' )
UpperCamelCase__ = decoder_config.pop('''model_type''' )
UpperCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = True
@classmethod
def _a (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCamelCase__ = True
UpperCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.encoder.to_dict()
UpperCamelCase__ = self.decoder.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
class _A ( snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] =version.parse("1.11" )
@property
def _a (self ) -> Any:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return 1E-4
@property
def _a (self ) -> int:
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class _A ( snake_case__ ):
@property
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
UpperCamelCase__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
UpperCamelCase__ = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ) -> int:
'''simple docstring'''
import torch
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ , UpperCamelCase__ = dummy_input['''input_ids'''].shape
UpperCamelCase__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase__ = dummy_input.pop('''input_ids''' )
UpperCamelCase__ = dummy_input.pop('''attention_mask''' )
UpperCamelCase__ = torch.zeros(SCREAMING_SNAKE_CASE__ )
return common_inputs
class _A ( snake_case__ ):
@property
def _a (self ) -> Dict:
'''simple docstring'''
pass
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE__ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "default" ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 415 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
def get_matched_characters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__SCREAMING_SNAKE_CASE )
__a = F'''{_stra[0:_stra.index(__SCREAMING_SNAKE_CASE )]} {_stra[_stra.index(__SCREAMING_SNAKE_CASE ) + 1:]}'''
return "".join(__SCREAMING_SNAKE_CASE )
# matching characters
__a = get_matched_characters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = get_matched_characters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = len(__SCREAMING_SNAKE_CASE )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(__SCREAMING_SNAKE_CASE )
+ match_count / len(__SCREAMING_SNAKE_CASE )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 582 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = HfArgumentParser(lowercase_ )
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ = TensorFlowBenchmark(args=lowercase_ )
try:
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
UpperCAmelCase_ = """ """.join(str(lowercase_ ).split(" " )[:-1] )
UpperCAmelCase_ = """"""
UpperCAmelCase_ = eval(str(lowercase_ ).split(" " )[-1] )
UpperCAmelCase_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase_ = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __A( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'nat'
SCREAMING_SNAKE_CASE__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE_=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**__UpperCamelCase )
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = len(__UpperCamelCase )
UpperCamelCase__ = num_heads
UpperCamelCase__ = kernel_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
UpperCamelCase__ = layer_scale_init_value
UpperCamelCase__ = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 513 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 555 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( snake_case__ , unittest.TestCase ):
lowercase = ShapEPipeline
lowercase = ['''prompt''']
lowercase = ['''prompt''']
lowercase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase = False
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 8
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_A )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowerCAmelCase__ : Optional[Any] = PriorTransformer(**_A )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ : List[Any] = ShapERenderer(**_A )
return model
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.dummy_prior
lowerCAmelCase__ : Optional[int] = self.dummy_text_encoder
lowerCAmelCase__ : List[Any] = self.dummy_tokenizer
lowerCAmelCase__ : str = self.dummy_renderer
lowerCAmelCase__ : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
lowerCAmelCase__ : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : Tuple , a : Union[str, Any] , a : Tuple=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
lowerCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
lowerCAmelCase__ : Dict = torch.Generator(device=_A ).manual_seed(_A )
lowerCAmelCase__ : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**_A )
lowerCAmelCase__ : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(_A ) )
lowerCAmelCase__ : int = output.images[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : Any = self.pipeline_class(**_A )
lowerCAmelCase__ : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ : Optional[int] = batch_size * [inputs[key]]
lowerCAmelCase__ : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
lowerCAmelCase__ : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
lowerCAmelCase__ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(0 )
lowerCAmelCase__ : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A ) | 710 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : int = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset | 69 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE_ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_SCREAMING_SNAKE_CASE = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE_ , filename="""pytorch_model.bin""" ) )
_SCREAMING_SNAKE_CASE = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_SCREAMING_SNAKE_CASE = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_SCREAMING_SNAKE_CASE = tensor_value
_SCREAMING_SNAKE_CASE = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# convert tokenizer
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 591 |
'''simple docstring'''
from ....utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__=None , A__=20_48 ) -> Tuple:
_SCREAMING_SNAKE_CASE = config.__dict__
_SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
_SCREAMING_SNAKE_CASE = num_labels
| 591 | 1 |
def UpperCamelCase ( _a ) -> float:
'''simple docstring'''
lowercase_ :List[str] = 0
while len(_a ) > 1:
lowercase_ :Dict = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowercase_ :List[str] = files.index(min(_a ) )
temp += files[min_index]
files.pop(_a )
files.append(_a )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441 |
import math
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( _a = 2_0 ) -> str:
'''simple docstring'''
lowercase_ :List[str] = math.comb(_a , _a )
lowercase_ :Tuple = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _a )
lowercase_ :Dict = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 441 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ : Dict = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
SCREAMING_SNAKE_CASE__ : Any = '▁'
class UpperCamelCase__ (_a ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : int = BigBirdTokenizer
lowerCamelCase_ : List[Any] = ["""input_ids""", """attention_mask"""]
lowerCamelCase_ : List[int] = []
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<unk>" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<pad>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[MASK]" , UpperCamelCase__="[CLS]" , **UpperCamelCase__ , ) -> List[Any]:
lowerCamelCase : Union[str, Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token
lowerCamelCase : List[str] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token
lowerCamelCase : Optional[int] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token
lowerCamelCase : Union[str, Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token
lowerCamelCase : Union[str, Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Dict = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase : str = vocab_file
lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[Any]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> Tuple:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[Any]:
lowerCamelCase : Optional[int] = [self.sep_token_id]
lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : int = os.path.join(
__lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 311 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase__ = "\nimport os\n"
UpperCamelCase__ = "\ndef foo():\n import os\n return False\n"
UpperCamelCase__ = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
UpperCamelCase__ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
UpperCamelCase__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , a__ )
def _UpperCamelCase (a__ :Any , a__ :Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = os.path.join(a__ , """test_file.py""" )
with open(a__ , """w""" ) as _tmp_file:
_tmp_file.write(a__ )
UpperCamelCase__ = get_imports(a__ )
assert parsed_imports == ["os"]
| 619 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:Union[str, Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 520 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_:int = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Value("""string""", id="""sequence""" ),
} ), reference_urls=[], )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=False, ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
A : Optional[Any] = np.array([re.sub(lowerCamelCase__, """""", lowerCamelCase__ ) for x in predictions] )
A : str = np.array([re.sub(lowerCamelCase__, """""", lowerCamelCase__ ) for x in references] )
else:
A : Dict = np.asarray(lowerCamelCase__ )
A : List[Any] = np.asarray(lowerCamelCase__ )
if ignore_case:
A : int = np.char.lower(lowerCamelCase__ )
A : Dict = np.char.lower(lowerCamelCase__ )
if ignore_punctuation:
A : str = string.punctuation.maketrans("""""", """""", string.punctuation )
A : List[str] = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
A : Dict = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
if ignore_numbers:
A : str = string.digits.maketrans("""""", """""", string.digits )
A : Dict = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
A : Tuple = np.char.translate(lowerCamelCase__, table=lowerCamelCase__ )
A : Any = predictions == references
return {"exact_match": np.mean(lowerCamelCase__ ) * 100}
| 520 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=32 , _snake_case=3 , _snake_case=4 , _snake_case=[10, 20, 30, 40] , _snake_case=[2, 2, 3, 2] , _snake_case=True , _snake_case=True , _snake_case=37 , _snake_case="gelu" , _snake_case=10 , _snake_case=0.02 , _snake_case=["stage2", "stage3", "stage4"] , _snake_case=3 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = num_stages
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_snake_case , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=_snake_case , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = UperNetForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = UperNetModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not have a base model' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = _config_zero_init(_snake_case )
lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(config=_snake_case )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowerCAmelCase = Image.open(_UpperCAmelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_snake_case )
lowerCAmelCase = prepare_img()
lowerCAmelCase = processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
with torch.no_grad():
lowerCAmelCase = model(**_snake_case )
lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCAmelCase = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_snake_case )
lowerCAmelCase = prepare_img()
lowerCAmelCase = processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
with torch.no_grad():
lowerCAmelCase = model(**_snake_case )
lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 ) )
| 4 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : List[Any] = object
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : str ) -> str:
pass
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_( lowercase_ : Namespace ) -> List[Any]:
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase_ , args.host , args.port , args.workers )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : dict
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def snake_case__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def snake_case__ ( self , lowerCamelCase__=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(lowerCamelCase__ )} )
| 661 | 0 |
from math import isclose, sqrt
def _snake_case (_snake_case : float , _snake_case : float , _snake_case : float) -> tuple[float, float, float]:
_lowercase =point_y / 4 / point_x
_lowercase =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowercase =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowercase =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowercase =outgoing_gradient**2 + 4
_lowercase =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowercase =(point_y - outgoing_gradient * point_x) ** 2 - 100
_lowercase =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
_lowercase =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowercase =x_minus if isclose(_snake_case , _snake_case) else x_plus
_lowercase =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _snake_case (_snake_case : float = 1.4 , _snake_case : float = -9.6) -> int:
_lowercase =0
_lowercase =first_x_coord
_lowercase =first_y_coord
_lowercase =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowercase , _lowercase , _lowercase =next_point(_snake_case , _snake_case , _snake_case)
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 710 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :List[Any], snake_case :Optional[Any], snake_case :int=13, snake_case :Optional[Any]=30, snake_case :Any=2, snake_case :Union[str, Any]=3, snake_case :Union[str, Any]=True, snake_case :List[str]=True, snake_case :List[str]=32, snake_case :Any=2, snake_case :Optional[Any]=4, snake_case :Any=37, snake_case :Tuple="gelu", snake_case :Union[str, Any]=0.1, snake_case :List[str]=0.1, snake_case :Tuple=10, snake_case :Optional[int]=0.0_2, snake_case :str=3, snake_case :Dict=None, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase =(image_size // patch_size) ** 2
_lowercase =num_patches + 1
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self :str):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=snake_case, initializer_range=self.initializer_range, )
def UpperCamelCase__ ( self :Dict, snake_case :Any, snake_case :str, snake_case :Union[str, Any]):
"""simple docstring"""
_lowercase =TFViTModel(config=snake_case)
_lowercase =model(snake_case, training=snake_case)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
_lowercase =self.image_size // 2
_lowercase =pixel_values[:, :, :image_size, :image_size]
_lowercase =model(snake_case, interpolate_pos_encoding=snake_case, training=snake_case)
_lowercase =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size))
def UpperCamelCase__ ( self :Any, snake_case :List[Any], snake_case :Optional[int], snake_case :Dict):
"""simple docstring"""
_lowercase =self.type_sequence_label_size
_lowercase =TFViTForImageClassification(snake_case)
_lowercase =model(snake_case, labels=snake_case, training=snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
_lowercase =self.image_size // 2
_lowercase =pixel_values[:, :, :image_size, :image_size]
_lowercase =model(snake_case, interpolate_pos_encoding=snake_case, training=snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowercase =1
_lowercase =TFViTForImageClassification(snake_case)
_lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowercase =model(snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Dict =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : List[str] =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : List[Any] =False
__lowerCAmelCase : Any =False
__lowerCAmelCase : List[Any] =False
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =TFViTModelTester(self)
_lowercase =ConfigTester(self, config_class=snake_case, has_text_modality=snake_case, hidden_size=37)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def UpperCamelCase__ ( self :str):
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case)
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case, tf.keras.layers.Layer))
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case)
_lowercase =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1], snake_case)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case)
@slow
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(snake_case)
def _snake_case () -> Any:
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=snake_case, return_tensors='tf')
# forward pass
_lowercase =model(**snake_case)
# verify the logits
_lowercase =tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, snake_case)
_lowercase =tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6])
tf.debugging.assert_near(outputs.logits[0, :3], snake_case, atol=1e-4)
| 557 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : int , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : StableDiffusionSafetyChecker , UpperCamelCase_ : CLIPImageProcessor , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def __UpperCamelCase ( self : str , UpperCamelCase_ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[torch.FloatTensor] = None , **UpperCamelCase_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : Dict = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : List[Any] = len(UpperCamelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCamelCase_ )}.""" )
# get prompt text embeddings
lowerCamelCase_ : int = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCamelCase_ : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCamelCase_ : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = text_embeddings.shape
lowerCamelCase_ : List[Any] = text_embeddings.repeat(1 , UpperCamelCase_ , 1 )
lowerCamelCase_ : Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[str]
if negative_prompt is None:
lowerCamelCase_ : Dict = ['''''']
elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !="""
F""" {type(UpperCamelCase_ )}.""" )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : List[str] = [negative_prompt]
elif batch_size != len(UpperCamelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowerCamelCase_ : List[str] = negative_prompt
lowerCamelCase_ : List[Any] = text_input_ids.shape[-1]
lowerCamelCase_ : str = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
lowerCamelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ : Optional[int] = uncond_embeddings.shape[1]
lowerCamelCase_ : int = uncond_embeddings.repeat(UpperCamelCase_ , UpperCamelCase_ , 1 )
lowerCamelCase_ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ : int = torch.randn(
UpperCamelCase_ , generator=UpperCamelCase_ , device='''cpu''' , dtype=UpperCamelCase_ ).to(self.device )
lowerCamelCase_ : List[str] = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device='''cpu''' , dtype=UpperCamelCase_ ).to(
self.device )
else:
lowerCamelCase_ : Tuple = torch.randn(
UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
lowerCamelCase_ : List[str] = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ : List[str] = latents_reference.to(self.device )
lowerCamelCase_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ : Tuple = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ : Dict = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ : List[str] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ : Any = 0 if dx < 0 else dx
lowerCamelCase_ : List[Any] = 0 if dy < 0 else dy
lowerCamelCase_ : List[str] = max(-dx , 0 )
lowerCamelCase_ : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ : int = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ : List[str] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : int = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Any = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCamelCase_ : Dict = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = noise_pred.chunk(2 )
lowerCamelCase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : int = 1 / 0.1_8215 * latents
lowerCamelCase_ : Dict = self.vae.decode(UpperCamelCase_ ).sample
lowerCamelCase_ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ : int = self.feature_extractor(self.numpy_to_pil(UpperCamelCase_ ) , return_tensors='''pt''' ).to(
self.device )
lowerCamelCase_ , lowerCamelCase_ : Tuple = self.safety_checker(
images=UpperCamelCase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ : Optional[int] = None
if output_type == "pil":
lowerCamelCase_ : Union[str, Any] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ )
| 501 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = "token-classification"
def __init__( self : Tuple , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowerCamelCase_ : Tuple = Namespace(**UpperCamelCase_ )
lowerCamelCase_ : Any = import_module('''tasks''' )
try:
lowerCamelCase_ : Any = getattr(UpperCamelCase_ , hparams.task_type )
lowerCamelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCamelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase_ : Optional[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Dict , **UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : str = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[str] = self(**UpperCamelCase_ )
lowerCamelCase_ : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase_ : List[Any] = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCamelCase_ : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowerCamelCase_ : int = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCamelCase_ : Dict = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.load(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase_ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase_ : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
"""Compute validation""" ""
lowerCamelCase_ : Optional[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[Any] = self(**UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = outputs[:2]
lowerCamelCase_ : List[Any] = logits.detach().cpu().numpy()
lowerCamelCase_ : str = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowerCamelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowerCamelCase_ : Optional[Any] = np.argmax(UpperCamelCase_ , axis=2 )
lowerCamelCase_ : List[str] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCamelCase_ : List[str] = dict(enumerate(self.labels ) )
lowerCamelCase_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase_ : Optional[Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowerCamelCase_ : Union[str, Any] = dict(results.items() )
lowerCamelCase_ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = self._eval_end(UpperCamelCase_ )
lowerCamelCase_ : Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase_ : Optional[int] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : List[Any] = NERTransformer(args)
__lowerCamelCase : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : Any = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__lowerCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 501 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowerCamelCase ( ):
_lowerCAmelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_lowerCAmelCase = Dataset.from_dict(snake_case )
return dataset
class lowerCamelCase__ ( UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = get_dataset()
_lowerCAmelCase = make_duplicate_clusters(lowercase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = get_dataset()
_lowerCAmelCase , _lowerCAmelCase = deduplicate_dataset(lowercase__ )
self.assertEqual(len(lowercase__ ) , 2 )
print(lowercase__ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , lowercase__ )
| 225 | import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = torch.exp(snake_case )
_lowerCAmelCase = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
_lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class lowerCamelCase__ ( nn.Module ):
def __init__( self : str , lowercase__ : List[str] ):
super().__init__()
_lowerCAmelCase = config.output_attentions
_lowerCAmelCase = config.output_hidden_states
_lowerCAmelCase = nn.ModuleList([BertLayer(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = nn.ModuleList([BertHighway(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Any ):
if (type(lowercase__ ) is float) or (type(lowercase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase = x
else:
_lowerCAmelCase = x
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : str ):
_lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=None , ):
_lowerCAmelCase = ()
_lowerCAmelCase = ()
_lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = layer_module(
lowercase__ , lowercase__ , head_mask[i] , lowercase__ , lowercase__ )
_lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase = all_attentions + (layer_outputs[1],)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = current_outputs + (all_attentions,)
_lowerCAmelCase = self.highway[i](lowercase__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase = highway_exit[0]
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowercase__ , i + 1 )
else:
_lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = outputs + (all_attentions,)
_lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Optional[int] , lowercase__ : List[Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = BertEmbeddings(lowercase__ )
_lowerCAmelCase = DeeBertEncoder(lowercase__ )
_lowerCAmelCase = BertPooler(lowercase__ )
self.init_weights()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.encoder.init_highway_pooler(self.pooler )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[Any] ):
_lowerCAmelCase = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[str] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowercase__ )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : str=None , lowercase__ : Any=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : int=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if encoder_attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if token_type_ids is None:
_lowerCAmelCase = torch.zeros(lowercase__ , dtype=torch.long , device=lowercase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase = self.get_extended_attention_mask(lowercase__ , lowercase__ , lowercase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase = self.get_head_mask(lowercase__ , self.config.num_hidden_layers )
_lowerCAmelCase = self.embeddings(
input_ids=lowercase__ , position_ids=lowercase__ , token_type_ids=lowercase__ , inputs_embeds=lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[Any] , lowercase__ : int , lowercase__ : Dict ):
_lowerCAmelCase = message
_lowerCAmelCase = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : Optional[Any] ):
super().__init__()
_lowerCAmelCase = BertPooler(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Dict ):
# Pooler
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase = bmodel_output[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowercase__ : Any ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = config.num_hidden_layers
_lowerCAmelCase = DeeBertModel(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict=None , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : Optional[int]=-1 , lowercase__ : Optional[int]=False , ):
_lowerCAmelCase = self.num_layers
try:
_lowerCAmelCase = self.bert(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , position_ids=lowercase__ , head_mask=lowercase__ , inputs_embeds=lowercase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase = outputs[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase = e.message
_lowerCAmelCase = e.exit_layer
_lowerCAmelCase = outputs[0]
if not self.training:
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = []
_lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase = []
for highway_exit in outputs[-1]:
_lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
_lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase = (loss,) + outputs
if not self.training:
_lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 225 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Union[str, Any] = '''▁'''
lowerCAmelCase_ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCamelCase_ ( _lowercase , unittest.TestCase ):
_A : Optional[int] = BertGenerationTokenizer
_A : List[Any] = False
_A : Union[str, Any] = True
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
UpperCAmelCase = BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = """<s>"""
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_02 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = """Hello World!"""
UpperCAmelCase = [1_85_36, 22_60, 1_01]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCAmelCase = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase = """ """.join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = BertGenerationConfig()
UpperCAmelCase = BertGenerationEncoder(_SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 673 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
__UpperCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = None
__UpperCamelCase = 20
__UpperCamelCase = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
__UpperCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__UpperCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__UpperCamelCase = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
__UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__UpperCamelCase = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
__UpperCamelCase = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __lowercase( self ) -> int:
__UpperCamelCase = None
__UpperCamelCase = 10
__UpperCamelCase = 2
# create ramp distribution
__UpperCamelCase = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
__UpperCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__UpperCamelCase = FlaxTopKLogitsWarper(3 )
__UpperCamelCase = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__UpperCamelCase = 5
__UpperCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__UpperCamelCase = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
__UpperCamelCase = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = None
__UpperCamelCase = 10
__UpperCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__UpperCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
__UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
__UpperCamelCase = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__UpperCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__UpperCamelCase = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__UpperCamelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
__UpperCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__UpperCamelCase = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = 20
__UpperCamelCase = 4
__UpperCamelCase = 0
__UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
__UpperCamelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__UpperCamelCase = 5
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = 15
__UpperCamelCase = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = 20
__UpperCamelCase = 4
__UpperCamelCase = 0
__UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
__UpperCamelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__UpperCamelCase = 1
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__UpperCamelCase = 3
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def __lowercase( self ) -> Any:
__UpperCamelCase = 20
__UpperCamelCase = 4
__UpperCamelCase = 0
__UpperCamelCase = 5
__UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
__UpperCamelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__UpperCamelCase = 4
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__UpperCamelCase = 3
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def __lowercase( self ) -> Dict:
__UpperCamelCase = 4
__UpperCamelCase = 10
__UpperCamelCase = 15
__UpperCamelCase = 2
__UpperCamelCase = 1
__UpperCamelCase = 15
# dummy input_ids and scores
__UpperCamelCase = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = input_ids.copy()
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = scores.copy()
# instantiate all dist processors
__UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCamelCase = FlaxTopKLogitsWarper(3 )
__UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 10
# no processor list
__UpperCamelCase = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
__UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCamelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __lowercase( self ) -> Dict:
__UpperCamelCase = 4
__UpperCamelCase = 10
__UpperCamelCase = 15
__UpperCamelCase = 2
__UpperCamelCase = 1
__UpperCamelCase = 15
# dummy input_ids and scores
__UpperCamelCase = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = input_ids.copy()
__UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = scores.copy()
# instantiate all dist processors
__UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCamelCase = FlaxTopKLogitsWarper(3 )
__UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCamelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
__UpperCamelCase = jax.jit(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = jax.jit(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 383 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowercase_ = logging.get_logger(__name__)
class snake_case ( _UpperCAmelCase ):
'''simple docstring'''
A_ : List[Any] = """mask2former"""
A_ : List[Any] = ["""swin"""]
A_ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : List[Any], _lowerCamelCase : Any = None, _lowerCamelCase : Union[str, Any] = 2_56, _lowerCamelCase : str = 2_56, _lowerCamelCase : List[Any] = 2_56, _lowerCamelCase : Tuple = 10_24, _lowerCamelCase : Union[str, Any] = "relu", _lowerCamelCase : int = 6, _lowerCamelCase : List[Any] = 10, _lowerCamelCase : List[Any] = 8, _lowerCamelCase : int = 0.0, _lowerCamelCase : str = 20_48, _lowerCamelCase : Union[str, Any] = False, _lowerCamelCase : str = False, _lowerCamelCase : List[str] = 4, _lowerCamelCase : Optional[int] = 2_55, _lowerCamelCase : Optional[int] = 1_00, _lowerCamelCase : Union[str, Any] = 0.1, _lowerCamelCase : List[str] = 2.0, _lowerCamelCase : Dict = 5.0, _lowerCamelCase : Optional[int] = 5.0, _lowerCamelCase : Union[str, Any] = 1_25_44, _lowerCamelCase : List[Any] = 3.0, _lowerCamelCase : Optional[Any] = 0.75, _lowerCamelCase : str = 0.02, _lowerCamelCase : Dict = 1.0, _lowerCamelCase : Union[str, Any] = True, _lowerCamelCase : List[str] = [4, 8, 16, 32], _lowerCamelCase : Union[str, Any] = None, **_lowerCamelCase : Optional[int], ):
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__A = CONFIG_MAPPING["""swin"""](
image_size=2_24, in_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=lowercase__, out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''], )
if isinstance(lowercase__, lowercase__ ):
__A = backbone_config.pop('''model_type''' )
__A = CONFIG_MAPPING[backbone_model_type]
__A = config_class.from_dict(lowercase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__A = backbone_config
__A = feature_size
__A = mask_feature_size
__A = hidden_dim
__A = encoder_feedforward_dim
__A = activation_function
__A = encoder_layers
__A = decoder_layers
__A = num_attention_heads
__A = dropout
__A = dim_feedforward
__A = pre_norm
__A = enforce_input_projection
__A = common_stride
__A = ignore_value
__A = num_queries
__A = no_object_weight
__A = class_weight
__A = mask_weight
__A = dice_weight
__A = train_num_points
__A = oversample_ratio
__A = importance_sample_ratio
__A = init_std
__A = init_xavier_std
__A = use_auxiliary_loss
__A = feature_strides
__A = output_auxiliary_logits
__A = decoder_layers
super().__init__(**lowercase__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any], _lowerCamelCase : Any, **_lowerCamelCase : Dict ):
'''simple docstring'''
return cls(
backbone_config=lowercase__, **lowercase__, )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = copy.deepcopy(self.__dict__ )
__A = self.backbone_config.to_dict()
__A = self.__class__.model_type
return output
| 704 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : Dict, _lowerCamelCase : Optional[Any]=13, _lowerCamelCase : str=32, _lowerCamelCase : str=2, _lowerCamelCase : str=3, _lowerCamelCase : Optional[Any]=16, _lowerCamelCase : int=[32, 64, 1_28], _lowerCamelCase : Dict=[1, 2, 1], _lowerCamelCase : Union[str, Any]=[2, 2, 4], _lowerCamelCase : Any=2, _lowerCamelCase : List[str]=2.0, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Dict=0.0, _lowerCamelCase : Optional[int]=0.0, _lowerCamelCase : str=0.1, _lowerCamelCase : Optional[int]="gelu", _lowerCamelCase : int=False, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : str=0.02, _lowerCamelCase : Tuple=1e-5, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[str]=None, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Optional[Any]=10, _lowerCamelCase : str=8, _lowerCamelCase : Optional[int]=["stage1", "stage2"], _lowerCamelCase : Optional[int]=[1, 2], ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = hidden_sizes
__A = depths
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = patch_norm
__A = layer_norm_eps
__A = initializer_range
__A = is_training
__A = scope
__A = use_labels
__A = type_sequence_label_size
__A = encoder_stride
__A = out_features
__A = out_indices
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size], self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Any, _lowerCamelCase : int ):
'''simple docstring'''
__A = FocalNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
__A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str, _lowerCamelCase : List[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__A = None
__A = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : List[str], _lowerCamelCase : int, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = FocalNetForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = FocalNetForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : str, _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = self.type_sequence_label_size
__A = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A_ : Dict = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = False
A_ : Optional[int] = False
A_ : Union[str, Any] = False
A_ : List[Any] = False
A_ : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase, embed_dim=37, has_text_modality=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__A = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase, nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Any, _lowerCamelCase : Any, _lowerCamelCase : List[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) )
__A = outputs.hidden_states
__A = getattr(
self.model_tester, '''expected_num_hidden_layers''', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
# FocalNet has a different seq_length
__A = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
__A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
__A , __A , __A , __A = reshaped_hidden_states[0].shape
__A = (
reshaped_hidden_states[0].view(_lowerCamelCase, _lowerCamelCase, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, (padded_height, padded_width) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = FocalNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
__A = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_lowerCamelCase )
__A = self.default_image_processor
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__A = image_processor(images=_lowerCamelCase, return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_lowerCamelCase )
# verify the logits
__A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape, _lowerCamelCase )
__A = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _lowerCamelCase, atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(), 2_81 )
@require_torch
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A_ : str = FocalNetConfig
A_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FocalNetModelTester(self )
| 215 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__magic_name__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__magic_name__ : Tuple = TaTokenizerFast
__magic_name__ : Optional[Any] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__magic_name__ : int = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 280 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __UpperCamelCase ( _a ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=0 ):
UpperCAmelCase__: Any = 1.0 if scale is None else scale
UpperCAmelCase__: int = 0.0 if loc is None else loc
super().__init__(lowerCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase__ )] )
@property
def _UpperCAmelCase ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ):
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ):
return self.variance.sqrt()
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__: Any = args_dim
UpperCAmelCase__: List[str] = nn.ModuleList([nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) for dim in args_dim.values()] )
UpperCAmelCase__: Any = domain_map
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: str = [proj(lowerCamelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCamelCase__ )
class __UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super().__init__()
UpperCAmelCase__: str = function
def _UpperCAmelCase ( self , lowerCamelCase__ , *lowerCamelCase__ ):
return self.function(lowerCamelCase__ , *lowerCamelCase__ )
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
def __init__( self , lowerCamelCase__ = 1 ):
UpperCAmelCase__: str = dim
UpperCAmelCase__: Any = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.dim == 1:
return self.distribution_class(*lowerCamelCase__ )
else:
return Independent(self.distribution_class(*lowerCamelCase__ ) , 1 )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
UpperCAmelCase__: Optional[Any] = self._base_distribution(lowerCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase__ , loc=lowerCamelCase__ , scale=lowerCamelCase__ , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ):
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ):
return 0.0
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return ParameterProjection(
in_features=lowerCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *lowerCamelCase__ ):
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( lowerCamelCase__ ):
return (x + torch.sqrt(torch.square(lowerCamelCase__ ) + 4.0 )) / 2.0
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"df": 1, "loc": 1, "scale": 1}
__magic_name__ = StudentT
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Dict = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase__: List[Any] = 2.0 + cls.squareplus(lowerCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"loc": 1, "scale": 1}
__magic_name__ = Normal
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: str = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = {"total_count": 1, "logits": 1}
__magic_name__ = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Dict = cls.squareplus(lowerCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__: str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ ) , 1 )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
UpperCAmelCase__ , UpperCAmelCase__: List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 113 | 0 |
'''simple docstring'''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE : List[Any] = len(a__ )
SCREAMING_SNAKE_CASE : int = max(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = min(a__ )
# create the counting array
SCREAMING_SNAKE_CASE : str = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE : Any = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , a__ ):
SCREAMING_SNAKE_CASE : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , a__ ) ):
SCREAMING_SNAKE_CASE : Dict = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
a__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
a__ : str = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 715 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[int] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1E-2
@property
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def __lowerCAmelCase ( self ) ->str:
return (3, 32, 32)
@property
def __lowerCAmelCase ( self ) ->Dict:
return (3, 32, 32)
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) ->Dict:
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __lowerCAmelCase ( self ) ->Dict:
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE : Tuple = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn_like(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE : str = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE : Any = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE : Tuple = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
SCREAMING_SNAKE_CASE : List[Any] = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE : Optional[int] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
SCREAMING_SNAKE_CASE : Dict = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE : List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
SCREAMING_SNAKE_CASE : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def __lowerCAmelCase ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = '''fp16''' if fpaa else None
SCREAMING_SNAKE_CASE : Dict = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def __lowerCAmelCase ( self , _lowerCamelCase=0 ) ->Optional[int]:
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : str = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : int = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.encode(_lowerCamelCase ).latent_dist
SCREAMING_SNAKE_CASE : int = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 333 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
__UpperCamelCase : str = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
__UpperCamelCase : Dict = {
"""abeja/gpt-neox-japanese-2.7b""": 2048,
}
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
__lowercase = json.loads(f.read() )
__lowercase = collections.OrderedDict()
__lowercase = collections.OrderedDict()
__lowercase = collections.OrderedDict()
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
__lowercase = f.readlines()
__lowercase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(lowerCamelCase ):
__lowercase = b
__lowercase = idx
for wd in b:
__lowercase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :int = VOCAB_FILES_NAMES
__snake_case :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Any = ['input_ids', 'attention_mask']
def __init__( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any="<|endoftext|>" , _lowerCAmelCase : int="<|endoftext|>" , _lowerCAmelCase : Optional[int]="<|startoftext|>" , _lowerCAmelCase : int="<|endoftext|>" , _lowerCAmelCase : Dict=False , **_lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , do_clean_text=_lowerCAmelCase , **_lowerCAmelCase , )
if not os.path.isfile(_lowerCAmelCase ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_lowerCAmelCase ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowercase = do_clean_text
__lowercase , __lowercase , __lowercase , __lowercase = load_vocab_and_emoji(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _a ( self : List[str] ) -> str:
"""simple docstring"""
return len(self.raw_vocab )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _a ( self : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_lowerCAmelCase , clean=self.do_clean_text )
def _a ( self : Any , _lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def _a ( self : List[Any] , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = """""".join(_lowerCAmelCase ).strip()
return out_string
def _a ( self : str , _lowerCAmelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
def _a ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = 0
if os.path.isdir(_lowerCAmelCase ):
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowercase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowercase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__lowercase = token_index
writer.write(""",""".join(_lowerCAmelCase ) + """\n""" )
index += 1
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _lowerCAmelCase )
return vocab_file, emoji_file
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = vocab # same as swe
__lowercase = ids_to_tokens # same as bpe
__lowercase = emoji
__lowercase = np.max([len(_lowerCAmelCase ) for w in self.vocab.keys()] )
__lowercase = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowercase = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowercase = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowercase = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowercase = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowercase = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowercase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowercase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowercase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens )
def _a ( self : Dict , _lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = self.content_repattera.sub("""<URL>""" , _lowerCAmelCase )
__lowercase = self.content_repattera.sub("""<EMAIL>""" , _lowerCAmelCase )
__lowercase = self.content_repattera.sub("""<TEL>""" , _lowerCAmelCase )
__lowercase = self.content_repattera.sub("""<DATE>""" , _lowerCAmelCase )
__lowercase = self.content_repattera.sub("""<DATE>""" , _lowerCAmelCase )
__lowercase = self.content_repattera.sub("""<PRICE>""" , _lowerCAmelCase )
__lowercase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowercase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def _a ( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str]=False ) -> int:
"""simple docstring"""
__lowercase = text.replace(""" """ , """<SP>""" )
__lowercase = text.replace(""" """ , """<SP>""" )
__lowercase = text.replace("""\r\n""" , """<BR>""" )
__lowercase = text.replace("""\n""" , """<BR>""" )
__lowercase = text.replace("""\r""" , """<BR>""" )
__lowercase = text.replace("""\t""" , """<TAB>""" )
__lowercase = text.replace("""—""" , """ー""" )
__lowercase = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowercase = text.replace(_lowerCAmelCase , _lowerCAmelCase )
if clean:
__lowercase = self.clean_text(_lowerCAmelCase )
def check_simbol(_lowerCAmelCase : List[Any] ):
__lowercase = x.encode()
if len(_lowerCAmelCase ) == 1 and len(_lowerCAmelCase ) == 2:
__lowercase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(_lowerCAmelCase : int ):
__lowercase = x.encode()
if len(_lowerCAmelCase ) == 1 and len(_lowerCAmelCase ) == 3:
__lowercase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_8080 and c <= 0Xe2_b07f:
return True
return False
__lowercase = 0
__lowercase = []
while pos < len(_lowerCAmelCase ):
__lowercase = min(len(_lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowercase = [] # (token_id, token, pos)
for e in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
__lowercase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowerCAmelCase ) > 2:
__lowercase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowerCAmelCase ) > 0:
# the smallest token_id is adopted
__lowercase , __lowercase , __lowercase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[0] )[0]
result.append(_lowerCAmelCase )
__lowercase = e
else:
__lowercase = pos + 1
__lowercase = text[pos:end]
if check_simbol(_lowerCAmelCase ):
result.append("""<KIGOU>""" )
elif checkuae(_lowerCAmelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowercase = end
return result
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]="\n" ) -> List[Any]:
"""simple docstring"""
__lowercase = []
__lowercase = []
__lowercase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowerCAmelCase ) > 0:
words.append(bytearray(_lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
__lowercase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_lowerCAmelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
words.append(bytearray(_lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
__lowercase = """""".join(_lowerCAmelCase )
return text
| 80 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
lowerCamelCase_ = nn.functional.normalize(_lowerCamelCase )
lowerCamelCase_ = nn.functional.normalize(_lowerCamelCase )
return torch.mm(_lowerCamelCase , normalized_text_embeds.t() )
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = CLIPConfig
__lowercase :int = ["CLIPEncoderLayer"]
def __init__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
lowerCamelCase_ = CLIPVisionModel(config.vision_config )
lowerCamelCase_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase__ )
lowerCamelCase_ = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase__ )
@torch.no_grad()
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.vision_model(UpperCamelCase__ )[1] # pooled_output
lowerCamelCase_ = self.visual_projection(UpperCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase_ = []
lowerCamelCase_ = image_embeds.shape[0]
for i in range(UpperCamelCase__ ):
lowerCamelCase_ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase_ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase_ = special_cos_dist[i][concept_idx]
lowerCamelCase_ = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
lowerCamelCase_ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase_ = cos_dist[i][concept_idx]
lowerCamelCase_ = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
lowerCamelCase_ = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.vision_model(UpperCamelCase__ )[1] # pooled_output
lowerCamelCase_ = self.visual_projection(UpperCamelCase__ )
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.special_care_embeds )
lowerCamelCase_ = cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase_ = 0.0
lowerCamelCase_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase_ = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase_ = special_care * 0.01
lowerCamelCase_ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase_ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 142 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = "▁"
__lowerCamelCase : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCamelCase : int = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCamelCase : int = {
"facebook/xglm-564M": 2048,
}
class UpperCAmelCase ( lowercase__ ):
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__(self : Dict , A__ : Optional[int] , A__ : List[Any]="<s>" , A__ : int="</s>" , A__ : Optional[Any]="</s>" , A__ : Optional[Any]="<s>" , A__ : Any="<unk>" , A__ : Any="<pad>" , A__ : Optional[Dict[str, Any]] = None , **A__ : Optional[int] , ) -> int:
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase = 7
lowercase = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
lowercase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase = len(self.sp_model )
lowercase = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : int ) -> int:
lowercase = self.__dict__.copy()
lowercase = None
lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Optional[Any] , A__ : Any ) -> List[Any]:
lowercase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ (self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> str:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ (self : Any , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> Dict:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def UpperCAmelCase__ (self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> Optional[Any]:
lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ (self : Dict ) -> Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ (self : List[str] ) -> List[Any]:
lowercase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ (self : Tuple , A__ : str ) -> Dict:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def UpperCAmelCase__ (self : Dict , A__ : Optional[Any] ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ (self : Optional[Any] , A__ : Any ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ (self : List[str] , A__ : Optional[Any] ) -> Optional[Any]:
lowercase = ''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , " " ).strip()
return out_string
def UpperCAmelCase__ (self : Optional[int] , A__ : str , A__ : Optional[str] = None ) -> Tuple:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , "wb" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 700 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
def __init__(self : Tuple , *A__ : Union[str, Any] , **A__ : Optional[Any] ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ )
| 459 | 0 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
while second != 0:
_A = first & second
first ^= second
_A = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input('Enter the first number: ').strip())
__A = int(input('Enter the second number: ').strip())
print(f'{add(first, second) = }')
| 484 |
"""simple docstring"""
from __future__ import annotations
def _A ( __lowercase ):
"""simple docstring"""
if len(__lowercase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
lowerCamelCase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def __lowerCAmelCase ( snake_case__ ):
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
__UpperCamelCase : Optional[Any] = (
"Expected a list of numbers as input, found "
F"{type(snake_case__ ).__name__}"
)
raise TypeError(snake_case__ )
else:
__UpperCamelCase : Optional[int] = F"Expected a list of numbers as input, found {type(snake_case__ ).__name__}"
raise TypeError(snake_case__ )
else:
raise ValueError("Missing an input" )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = None
A = BloomTokenizerFast
A = BloomTokenizerFast
A = True
A = False
A = "tokenizer_file"
A = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def a_ (self ) -> Dict:
super().setUp()
__UpperCamelCase : Optional[int] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def a_ (self , **_UpperCAmelCase ) -> str:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCamelCase : Optional[Any] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__UpperCamelCase : Optional[Any] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__UpperCamelCase : Dict = tokenizer.batch_encode_plus(_UpperCAmelCase )["input_ids"]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self , _UpperCAmelCase=6 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCamelCase : List[Any] = "This is a simple input"
__UpperCamelCase : int = ["This is a simple input 1", "This is a simple input 2"]
__UpperCamelCase : Any = ("This is a simple input", "This is a pair")
__UpperCamelCase : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
__UpperCamelCase : List[Any] = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[str] = self.get_rust_tokenizer()
__UpperCamelCase : Any = load_dataset("xnli" , "all_languages" , split="test" , streaming=_UpperCAmelCase )
__UpperCamelCase : List[Any] = next(iter(_UpperCAmelCase ) )["premise"] # pick up one data
__UpperCamelCase : Dict = list(sample_data.values() )
__UpperCamelCase : Optional[Any] = list(map(tokenizer.encode , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 399 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray ):
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray ):
lowercase_ : Optional[int] = v.conjugate().T
lowercase_ : str = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def lowercase__( ):
lowercase_ : Optional[int] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowercase_ : Tuple = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 425 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
stooge(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return arr
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase_ , lowercase_ : List[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase_ : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , i + t , (__SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 425 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowerCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] ):
return max(metric_fn(snake_case_ , snake_case_ ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Any ):
snake_case__ : List[str] = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : Optional[Any] = []
if args.gold_data_mode == "qa":
snake_case__ : Optional[int] = pd.read_csv(snake_case_ , sep="\t" , header=snake_case_ )
for answer_list in data[1]:
snake_case__ : Union[str, Any] = ast.literal_eval(snake_case_ )
answers.append(snake_case_ )
else:
snake_case__ : Optional[Any] = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : Optional[int] = [[reference] for reference in references]
snake_case__ : Dict = 0
for prediction, ground_truths in zip(snake_case_ , snake_case_ ):
total += 1
em += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
fa += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Any = 1_00.0 * em / total
snake_case__ : Optional[Any] = 1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
snake_case__ : Any = args.k
snake_case__ : Tuple = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : int = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : Dict = 0
for hypo, reference in zip(snake_case_ , snake_case_ ):
snake_case__ : List[str] = set(hypo.split("\t" )[:k] )
snake_case__ : str = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case__ : Any = 1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
def strip_title(snake_case_ : Union[str, Any] ):
if title.startswith("\"" ):
snake_case__ : List[Any] = title[1:]
if title.endswith("\"" ):
snake_case__ : Union[str, Any] = title[:-1]
return title
snake_case__ : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors="pt" , padding=snake_case_ , truncation=snake_case_ , )["input_ids"].to(args.device )
snake_case__ : int = rag_model.rag.question_encoder(snake_case_ )
snake_case__ : Optional[Any] = question_enc_outputs[0]
snake_case__ : Tuple = rag_model.retriever(
snake_case_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
snake_case__ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case__ : Dict = []
for docs in all_docs:
snake_case__ : Optional[Any] = [strip_title(snake_case_ ) for title in docs["title"]]
provenance_strings.append("\t".join(snake_case_ ) )
return provenance_strings
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
with torch.no_grad():
snake_case__ : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors="pt" , padding=snake_case_ , truncation=snake_case_ )
snake_case__ : str = inputs_dict.input_ids.to(args.device )
snake_case__ : Any = inputs_dict.attention_mask.to(args.device )
snake_case__ : Any = rag_model.generate( # rag_model overwrites generate
snake_case_ , attention_mask=snake_case_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case__ : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
if args.print_predictions:
for q, a in zip(snake_case_ , snake_case_ ):
logger.info("Q: {} - A: {}".format(snake_case_ , snake_case_ ) )
return answers
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=snake_case_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=snake_case_ , choices=["exact", "compressed", "legacy"] , type=snake_case_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=snake_case_ , type=snake_case_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=snake_case_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=snake_case_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=snake_case_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=snake_case_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=snake_case_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=snake_case_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=snake_case_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=snake_case_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=snake_case_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
snake_case__ : Union[str, Any] = parser.parse_args()
snake_case__ : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
snake_case__ : List[Any] = {}
if args.model_type is None:
snake_case__ : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
snake_case__ : Any = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
snake_case__ : Tuple = args.n_docs
if args.index_name is not None:
snake_case__ : Tuple = args.index_name
if args.index_path is not None:
snake_case__ : Optional[Any] = args.index_path
else:
snake_case__ : List[Any] = BartForConditionalGeneration
snake_case__ : List[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , snake_case_ )
snake_case__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
snake_case__ : List[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(snake_case_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
snake_case__ : Dict = RagRetriever.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : int = model_class.from_pretrained(snake_case_ , retriever=snake_case_ , **snake_case_ )
model.retriever.init_retrieval()
else:
snake_case__ : Optional[int] = model_class.from_pretrained(snake_case_ , **snake_case_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
snake_case__ : Union[str, Any] = []
for line in tqdm(snake_case_ ):
questions.append(line.strip() )
if len(snake_case_ ) == args.eval_batch_size:
snake_case__ : Dict = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write("\n".join(snake_case_ ) + "\n" )
preds_file.flush()
snake_case__ : Optional[int] = []
if len(snake_case_ ) > 0:
snake_case__ : Union[str, Any] = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write("\n".join(snake_case_ ) )
preds_file.flush()
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowerCamelCase : Dict = get_args()
main(args)
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :str = logging.get_logger("""transformers.models.speecht5""")
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCamelCase_ = checkpoint["input_conv.weight_g"]
UpperCamelCase_ = checkpoint["input_conv.weight_v"]
UpperCamelCase_ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_ = checkpoint[f"upsamples.{i}.1.weight_g"]
UpperCamelCase_ = checkpoint[f"upsamples.{i}.1.weight_v"]
UpperCamelCase_ = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_ = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCamelCase_ = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCamelCase_ = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
UpperCamelCase_ = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCamelCase_ = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCamelCase_ = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
UpperCamelCase_ = checkpoint["output_conv.1.weight_g"]
UpperCamelCase_ = checkpoint["output_conv.1.weight_v"]
UpperCamelCase_ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Any:
"""simple docstring"""
if config_path is not None:
UpperCamelCase_ = SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = SpeechTaHifiGanConfig()
UpperCamelCase_ = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ )
load_weights(orig_checkpoint["model"]["generator"] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = np.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = stats[0].reshape(-1 )
UpperCamelCase_ = stats[1].reshape(-1 )
UpperCamelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).float()
UpperCamelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE :Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 628 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :List[Any] = LayoutLMTokenizer
UpperCamelCase_ :Dict = LayoutLMTokenizerFast
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :Dict = True
def UpperCAmelCase_ ( self )-> str:
super().setUp()
UpperCamelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , **_lowercase )-> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = "UNwant\u00E9d,running"
UpperCamelCase_ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file )
UpperCamelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowercase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self )-> Dict:
pass
| 628 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'glpn'
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[32, 64, 160, 256] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[1, 2, 5, 8] , __UpperCAmelCase=[4, 4, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=64 , __UpperCAmelCase=10 , __UpperCAmelCase=-1 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : List[Any] = num_encoder_blocks
lowerCAmelCase__ : List[str] = depths
lowerCAmelCase__ : Dict = sr_ratios
lowerCAmelCase__ : List[str] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = patch_sizes
lowerCAmelCase__ : List[str] = strides
lowerCAmelCase__ : int = mlp_ratios
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = drop_path_rate
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : str = decoder_hidden_size
lowerCAmelCase__ : Dict = max_depth
lowerCAmelCase__ : List[Any] = head_in_index
| 678 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' )
lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' )
lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : Tuple = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ : List[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}"""
)
raise ValueError(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ : int = 1
if from_exponent > to_exponent:
lowerCAmelCase__ : List[str] = from_exponent - to_exponent
else:
lowerCAmelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 678 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_A : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_A : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_A : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_A : Optional[int] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_A : Optional[Any] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_A : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_A : List[Any] = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _a ( ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : int = randrange(len(lowerCamelCase_ ) ), randrange(len(lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowerCamelCase__ , lowerCamelCase__ : Dict = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _a ( UpperCAmelCase = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCamelCase_ ))
@pytest.mark.parametrize('''hand, expected''' , lowerCamelCase_ )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(lowerCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCamelCase_ )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
assert PokerHand(lowerCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , lowerCamelCase_ )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = PokerHand(lowerCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , lowerCamelCase_ )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
assert PokerHand(lowerCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , lowerCamelCase_ )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
assert PokerHand(lowerCamelCase_ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , lowerCamelCase_ )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
assert PokerHand(lowerCamelCase_ ).compare_with(PokerHand(lowerCamelCase_ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
assert PokerHand(lowerCamelCase_ ).compare_with(PokerHand(lowerCamelCase_ ) ) == expected
def _a ( ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = [PokerHand(lowerCamelCase_ ) for hand in SORTED_HANDS]
lowerCamelCase__ : Dict = poker_hands.copy()
shuffle(lowerCamelCase_ )
lowerCamelCase__ : Any = chain(sorted(lowerCamelCase_ ) )
for index, hand in enumerate(lowerCamelCase_ ):
assert hand == poker_hands[index]
def _a ( ) -> List[str]:
"""simple docstring"""
# Test that five high straights are compared correctly.
lowerCamelCase__ : List[Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=lowerCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _a ( ) -> Any:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowerCamelCase__ : Optional[Any] = PokerHand('''2C 4S AS 3D 5C''' )
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : str = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _a ( ) -> Dict:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = os.path.abspath(os.path.dirname(lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = os.path.join(lowerCamelCase_ , '''poker_hands.txt''' )
with open(lowerCamelCase_ ) as file_hand:
for line in file_hand:
lowerCamelCase__ : Optional[Any] = line[:14].strip()
lowerCamelCase__ : Optional[Any] = line[15:].strip()
lowerCamelCase__ , lowerCamelCase__ : List[str] = PokerHand(lowerCamelCase_ ), PokerHand(lowerCamelCase_ )
lowerCamelCase__ : Dict = player.compare_with(lowerCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 712 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : Union[str, Any] = MaskFormerConfig(backbone_config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase__ : int = 847
lowerCamelCase__ : Dict = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase__ : Dict = 150
lowerCamelCase__ : Optional[int] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase__ : List[str] = 171
lowerCamelCase__ : Dict = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase__ : Dict = 133
lowerCamelCase__ : Tuple = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase__ : int = 19
lowerCamelCase__ : Dict = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase__ : List[Any] = 65
lowerCamelCase__ : Optional[int] = '''mapillary-vistas-id2label.json'''
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = dct.pop(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = val
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Any = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:dim, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[: dim]
lowerCamelCase__ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : str = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-dim :, :
]
lowerCamelCase__ : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
# fmt: off
lowerCamelCase__ : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCamelCase__ : str = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Dict = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : str = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : int = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCamelCase__ : Optional[int] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : int = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Any = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def _a ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Optional[int] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_maskformer_config(UpperCAmelCase )
# load original state_dict
with open(UpperCAmelCase , '''rb''' ) as f:
lowerCamelCase__ : Tuple = pickle.load(UpperCAmelCase )
lowerCamelCase__ : Tuple = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase__ : Tuple = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_swin_q_k_v(UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase , UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCAmelCase )
# load 🤗 model
lowerCamelCase__ : Any = MaskFormerForInstanceSegmentation(UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase , param.shape )
lowerCamelCase__ , lowerCamelCase__ : Any = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
lowerCamelCase__ : List[str] = prepare_img()
if "vistas" in model_name:
lowerCamelCase__ : Any = 65
elif "cityscapes" in model_name:
lowerCamelCase__ : Optional[Any] = 65535
else:
lowerCamelCase__ : List[Any] = 255
lowerCamelCase__ : int = True if '''ade''' in model_name else False
lowerCamelCase__ : str = MaskFormerImageProcessor(ignore_index=UpperCAmelCase , reduce_labels=UpperCAmelCase )
lowerCamelCase__ : List[str] = image_processor(UpperCAmelCase , return_tensors='''pt''' )
lowerCamelCase__ : Dict = model(**UpperCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase__ : Any = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 130 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowercase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowercase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowercase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowercase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowercase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowercase , type=lowercase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowercase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
return args
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
def fn(lowercase ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(len(tokenized_data["input_ids"] ) ):
SCREAMING_SNAKE_CASE : Any = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
SCREAMING_SNAKE_CASE : str = tf.train.Features(feature=lowercase )
SCREAMING_SNAKE_CASE : List[Any] = tf.train.Example(features=lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = example.SerializeToString()
records.append(lowercase )
return records
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = min(len(lowercase ) , args.limit )
SCREAMING_SNAKE_CASE : List[str] = dataset.select(range(lowercase ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_function(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE : List[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE : Tuple = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
SCREAMING_SNAKE_CASE : int = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE : Dict = len(dataset_snapshot["input_ids"] )
SCREAMING_SNAKE_CASE : Any = os.path.join(lowercase , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
SCREAMING_SNAKE_CASE : Any = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : List[Any] = serialized_examples[i]
out_file.write(lowercase )
print("Wrote file {} containing {} records".format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , "w" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=lowercase )
if __name__ == "__main__":
snake_case = parse_args()
main(args)
| 62 | '''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase ( a_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> tuple[int, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE : int = x_den * y_den * z_den
SCREAMING_SNAKE_CASE : int = gcd(a_ , a_ )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase ( a_ = 35 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : set = set()
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Fraction = Fraction(0 )
SCREAMING_SNAKE_CASE : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE : Optional[Any] = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE : Optional[int] = x_den * y_den
SCREAMING_SNAKE_CASE : str = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : List[Any] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
SCREAMING_SNAKE_CASE : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(a_ ) and is_sq(a_ ):
SCREAMING_SNAKE_CASE : Dict = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : int = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : Any = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Dict = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=-1
SCREAMING_SNAKE_CASE : Any = x_num * y_num
SCREAMING_SNAKE_CASE : List[str] = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE : List[Any] = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Optional[int] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
SCREAMING_SNAKE_CASE : Any = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a_ ) and is_sq(a_ ):
SCREAMING_SNAKE_CASE : str = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : int = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : Dict = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Optional[int] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
for num, den in unique_s:
total += Fraction(a_ , a_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__snake_case : Optional[int] = logging.getLogger(__name__)
__snake_case : Optional[int] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__snake_case : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowerCAmelCase)} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_SCREAMING_SNAKE_CASE : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=_lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_lowerCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_SCREAMING_SNAKE_CASE : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''})
_SCREAMING_SNAKE_CASE : bool = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.train_file is not None:
lowerCAmelCase__ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCAmelCase__ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ = [json.loads(UpperCamelCase_ ) for line in f.read().splitlines() if (len(UpperCamelCase_ ) > 0 and not line.isspace())]
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCAmelCase__ = {c: dataset[c] for c in dataset.column_names}
lowerCAmelCase__ = refs
return Dataset.from_dict(UpperCamelCase_ )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
lowerCAmelCase__ = {}
if data_args.train_file is not None:
lowerCAmelCase__ = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase__ = data_args.validation_file
lowerCAmelCase__ = data_args.train_file.split('.' )[-1]
if extension == "txt":
lowerCAmelCase__ = 'text'
lowerCAmelCase__ = load_dataset(UpperCamelCase_ , data_files=UpperCamelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
lowerCAmelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
lowerCAmelCase__ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
lowerCAmelCase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ = AutoModelForMaskedLM.from_config(UpperCamelCase_ )
model.resize_token_embeddings(len(UpperCamelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCAmelCase__ = datasets['train'].column_names
else:
lowerCAmelCase__ = datasets['validation'].column_names
lowerCAmelCase__ = 'text' if 'text' in column_names else column_names[0]
lowerCAmelCase__ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase_ : Optional[Any] ):
# Remove empty lines
lowerCAmelCase__ = [line for line in examples['text'] if len(UpperCamelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=data_args.max_seq_length )
lowerCAmelCase__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCAmelCase__ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCAmelCase__ = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCAmelCase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCAmelCase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCAmelCase__ = DataCollatorForWholeWordMask(tokenizer=UpperCamelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase__ = model_args.model_name_or_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase__ = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ = perplexity
lowerCAmelCase__ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def _UpperCamelCase ( UpperCamelCase_ : str ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 709 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
__snake_case : Any = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_SCREAMING_SNAKE_CASE : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.train_file is not None:
lowerCAmelCase__ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase__ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
_SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase__ = [feature.pop(_UpperCamelCase ) for feature in features]
lowerCAmelCase__ = len(_UpperCamelCase )
lowerCAmelCase__ = len(features[0]['input_ids'] )
lowerCAmelCase__ = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCamelCase )] for feature in features
]
lowerCAmelCase__ = list(chain(*_UpperCamelCase ) )
lowerCAmelCase__ = self.tokenizer.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCAmelCase__ = {k: v.view(_UpperCamelCase , _UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase__ = torch.tensor(_UpperCamelCase , dtype=torch.intaa )
return batch
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
datasets.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase__ = {}
if data_args.train_file is not None:
lowerCAmelCase__ = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase__ = data_args.validation_file
lowerCAmelCase__ = data_args.train_file.split('.' )[-1]
lowerCAmelCase__ = load_dataset(
UpperCamelCase_ , data_files=UpperCamelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase__ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase__ = [F"ending{i}" for i in range(4 )]
lowerCAmelCase__ = 'sent1'
lowerCAmelCase__ = 'sent2'
if data_args.max_seq_length is None:
lowerCAmelCase__ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCAmelCase__ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase_ : Tuple ):
lowerCAmelCase__ = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase__ = examples[question_header_name]
lowerCAmelCase__ = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase_ )
]
# Flatten out
lowerCAmelCase__ = list(chain(*UpperCamelCase_ ) )
lowerCAmelCase__ = list(chain(*UpperCamelCase_ ) )
# Tokenize
lowerCAmelCase__ = tokenizer(
UpperCamelCase_ , UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase__ = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = min(len(UpperCamelCase_ ) , data_args.max_train_samples )
lowerCAmelCase__ = train_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase__ = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase__ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = min(len(UpperCamelCase_ ) , data_args.max_eval_samples )
lowerCAmelCase__ = eval_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase__ = eval_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCAmelCase__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = eval_predictions
lowerCAmelCase__ = np.argmax(UpperCamelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ )
)
lowerCAmelCase__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('train' , UpperCamelCase_ )
trainer.save_metrics('train' , UpperCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase_ )
lowerCAmelCase__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('eval' , UpperCamelCase_ )
trainer.save_metrics('eval' , UpperCamelCase_ )
lowerCAmelCase__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 365 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = field(
metadata={"help": "The output directory where the model will be written."} , )
_a = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don\'t set if you want to train an encoder model from scratch."
)
} , )
_a = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don\'t set if you want to train a decoder model from scratch."
)
} , )
_a = field(
default=a__ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_a = field(
default=a__ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def __lowerCamelCase ( ) -> List[str]:
lowerCamelCase_ : Any = HfArgumentParser((ModelArguments,) )
((lowerCamelCase_ ), ) : List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowerCamelCase_ : List[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowerCamelCase_ : int = decoder_config.decoder_start_token_id
lowerCamelCase_ : List[str] = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowerCamelCase_ : List[str] = decoder_config.bos_token_id
if pad_token_id is None:
lowerCamelCase_ : Tuple = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowerCamelCase_ : List[Any] = decoder_config.eos_token_id
lowerCamelCase_ : Dict = decoder_start_token_id
lowerCamelCase_ : Optional[int] = pad_token_id
lowerCamelCase_ : int = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowerCamelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 278 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[int]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "weight" in name:
__a = '''weight'''
elif "bias" in name:
__a = '''bias'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Optional[Any]:
if config_path is not None:
__a = HubertConfig.from_pretrained(a__ )
else:
__a = HubertConfig()
if is_finetuned:
if dict_path:
__a = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(a__ , '''vocab.json''' )
if not os.path.isdir(a__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , a__ )
__a = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , )
__a = True if config.feat_extract_norm == '''layer''' else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__a = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__a = HubertForCTC(a__ )
else:
__a = HubertModel(a__ )
if is_finetuned:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a = model[0].eval()
recursively_load_weights(a__ , a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 219 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list[int] ):
if not numbers:
return 0
if not isinstance(_lowerCamelCase , (list, tuple) ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__SCREAMING_SNAKE_CASE : Any = numbers[0]
for i in range(1 , len(_lowerCamelCase ) ):
# update the maximum and minimum subarray products
__SCREAMING_SNAKE_CASE : Dict = numbers[i]
if number < 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = min_till_now, max_till_now
__SCREAMING_SNAKE_CASE : str = max(_lowerCamelCase , max_till_now * number )
__SCREAMING_SNAKE_CASE : Tuple = min(_lowerCamelCase , min_till_now * number )
# update the maximum product found till now
__SCREAMING_SNAKE_CASE : Dict = max(_lowerCamelCase , _lowerCamelCase )
return max_prod | 178 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ : Optional[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase__ : List[str] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase__ : Dict = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[int]="uniform_average" , lowerCAmelCase__ : int=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse} | 178 | 1 |
"""simple docstring"""
import re
def lowerCamelCase__ ( UpperCAmelCase_ )-> list:
"""simple docstring"""
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
UpperCamelCase = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> str:
"""simple docstring"""
try:
UpperCamelCase = split_input(_snake_case )
if upper:
UpperCamelCase = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCamelCase = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
return to_simple_case(_snake_case )
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
try:
UpperCamelCase = to_simple_case(_snake_case )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> str:
"""simple docstring"""
return to_complex_case(_snake_case , _snake_case , "_" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> str:
"""simple docstring"""
return to_complex_case(_snake_case , _snake_case , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 554 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=False , _a=False , _a=False , _a=2 , _a=99 , _a=0 , _a=32 , _a=5 , _a=4 , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=2 , _a=4 , _a="last" , _a=True , _a=None , _a=0 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Optional[Any] = seq_length
__magic_name__ : Tuple = is_training
__magic_name__ : str = use_input_lengths
__magic_name__ : List[str] = use_token_type_ids
__magic_name__ : Optional[Any] = use_labels
__magic_name__ : Union[str, Any] = gelu_activation
__magic_name__ : Optional[int] = sinusoidal_embeddings
__magic_name__ : Tuple = causal
__magic_name__ : Tuple = asm
__magic_name__ : Dict = n_langs
__magic_name__ : Tuple = vocab_size
__magic_name__ : Tuple = n_special
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : str = type_sequence_label_size
__magic_name__ : int = initializer_range
__magic_name__ : Optional[int] = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : int = summary_type
__magic_name__ : int = use_proj
__magic_name__ : Any = scope
__magic_name__ : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_input_lengths:
__magic_name__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__magic_name__ : Optional[int] = None
if self.use_token_type_ids:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__magic_name__ : Dict = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : str = ids_tensor([self.batch_size] , 2 ).float()
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : int = XLMModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Any = model(_a , lengths=_a , langs=_a )
__magic_name__ : Any = model(_a , langs=_a )
__magic_name__ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Union[str, Any] = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Optional[Any] = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
__magic_name__ : int = model(_a )
__magic_name__ : Optional[int] = model(_a , start_positions=_a , end_positions=_a )
__magic_name__ : List[str] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Tuple = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
__magic_name__ : List[Any] = model(_a )
__magic_name__ : Union[str, Any] = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , )
__magic_name__ : str = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , )
((__magic_name__) , ) : Optional[Any] = result_with_labels.to_tuple()
__magic_name__ : int = model(_a , start_positions=_a , end_positions=_a )
((__magic_name__) , ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : List[str] = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : Union[str, Any] = model(_a )
__magic_name__ : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Union[str, Any] = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__magic_name__ : List[Any] = self.num_choices
__magic_name__ : Optional[int] = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__magic_name__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=False ):
__magic_name__ : Union[str, Any] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__magic_name__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
__magic_name__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = XLMModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=_a , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
__magic_name__ : str = min_length + idx + 1
__magic_name__ : Dict = min_length + idx + 1
__magic_name__ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
__magic_name__ : str = min_length + idx + 1
__magic_name__ : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Any = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_a )
__magic_name__ : Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president
__magic_name__ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__magic_name__ : Optional[int] = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
| 124 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _snake_case :
_lowercase : List[str] = PegasusConfig
_lowercase : List[str] = {}
_lowercase : List[str] = '''gelu'''
def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=32 , a=2 , a=4 , a=37 , a=0.1 , a=0.1 , a=40 , a=2 , a=1 , a=0 , ) -> Any:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(a , a , a)
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> str:
SCREAMING_SNAKE_CASE = TFPegasusModel(config=a).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , head_mask=a , use_cache=a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1)
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1)
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)[0]
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , past_key_values=a)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1]))
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1E-3)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : Dict = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Any = True
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = TFPegasusModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a)
@require_sentencepiece
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : Optional[Any] = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : List[Any] = '''google/pegasus-xsum'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Tuple:
SCREAMING_SNAKE_CASE = self.translate_src_text(**a)
assert self.expected_text == generated_words
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **a , padding=a , return_tensors='tf')
SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a , )
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a)
return generated_words
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 444 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self , a , a=99 , a=13 , a=7 , a=9 , a=True , a=True , a=False , a=32 , a=5 , a=4 , a=37 , a=8 , a=0.1 , a=0.0_02 , a=1 , a=0 , a=0 , a=None , a=None , ) -> List[str]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = encoder_seq_length
SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE = self.decoder_seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = relative_attention_num_buckets
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = decoder_start_token_id
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return TaConfig.from_pretrained('google/umt5-base')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Optional[int]:
if attention_mask is None:
SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=a)
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=a)
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=a)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = config.num_attention_heads
SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(a , a , a)
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = UMTaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
input_ids=a , decoder_input_ids=a , attention_mask=a , decoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(input_ids=a , decoder_input_ids=a)
SCREAMING_SNAKE_CASE = result.last_hidden_state
SCREAMING_SNAKE_CASE = result.past_key_values
SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(a) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).get_decoder().to(a).eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
self.parent.assertTrue(len(a) == len(a))
self.parent.assertTrue(len(a) == len(a) + 1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = model(a , past_key_values=a)['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self , a , a , ) -> str:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).to(a).half().eval()
SCREAMING_SNAKE_CASE = model(**a)['last_hidden_state']
self.parent.assertFalse(torch.isnan(a).any().item())
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowercase : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowercase : Tuple = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowercase : int = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0]).to(a)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=a , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = config_and_inputs[0]
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(a).eval()
model.to(a)
SCREAMING_SNAKE_CASE = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=a),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
}
for attn_name, (name, mask) in zip(a , head_masking.items()):
SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_heads , device=a)
SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=a , return_dict_in_generate=a , **a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=a).to(a)
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=a , legacy=a)
SCREAMING_SNAKE_CASE = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='pt' , padding=a).input_ids
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
])
# fmt: on
torch.testing.assert_allclose(a , a)
SCREAMING_SNAKE_CASE = model.generate(input_ids.to(a))
SCREAMING_SNAKE_CASE = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertEqual(a , a)
| 444 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
__magic_name__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = ["""input_ids""", """attention_mask"""]
a_ = NllbTokenizer
a_ = []
a_ = []
def __init__( self : Any ,_a : Dict=None ,_a : Optional[Any]=None ,_a : List[str]="<s>" ,_a : Union[str, Any]="</s>" ,_a : List[Any]="</s>" ,_a : int="<s>" ,_a : List[Any]="<unk>" ,_a : str="<pad>" ,_a : Tuple="<mask>" ,_a : List[str]=None ,_a : str=None ,_a : List[Any]=None ,_a : Dict=False ,**_a : int ,):
'''simple docstring'''
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Any = legacy_behaviour
super().__init__(
vocab_file=_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,src_lang=_a ,tgt_lang=_a ,additional_special_tokens=_a ,legacy_behaviour=_a ,**_a ,)
A_ : List[str] = vocab_file
A_ : Any = False if not self.vocab_file else True
A_ : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
A_ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ : Optional[int] = src_lang if src_lang is not None else """eng_Latn"""
A_ : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
A_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _a ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
A_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Union[str, Any] ,_a : str ,_a : Optional[str] ,_a : Optional[str] ,**_a : Optional[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A_ : Optional[Any] = src_lang
A_ : Optional[int] = self(_a ,add_special_tokens=_a ,return_tensors=_a ,**_a )
A_ : Union[str, Any] = self.convert_tokens_to_ids(_a )
A_ : Dict = tgt_lang_id
return inputs
def _a ( self : Tuple ,_a : List[str] ,_a : str = "eng_Latn" ,_a : Optional[List[str]] = None ,_a : str = "fra_Latn" ,**_a : Tuple ,):
'''simple docstring'''
A_ : List[Any] = src_lang
A_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(_a ,_a ,**_a )
def _a ( self : Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Tuple ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
A_ : Dict = []
A_ : int = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Tuple = [self.cur_lang_code]
A_ : List[str] = [self.eos_token_id]
A_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str ,pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _a ( self : List[str] ,_a : str ):
'''simple docstring'''
A_ : Any = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
A_ : List[Any] = []
A_ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Union[str, Any] = [self.cur_lang_code]
A_ : Tuple = [self.eos_token_id]
A_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str ,pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _a ( self : List[str] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
A_ : Optional[Any] = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : int , _A : Union[str, Any] , _A : List[Any] , _A : Optional[int] = 3 , _A : str = 1 , _A : str = 1 , _A : Optional[int] = "relu" , ):
super().__init__()
_UpperCamelCase = nn.Convad(
lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , groups=lowerCamelCase_ , bias=lowerCamelCase_ , )
_UpperCamelCase = nn.BatchNormad(lowerCamelCase_ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : List[str] , _A : Tuple ):
_UpperCamelCase = self.convolution(lowerCamelCase_ )
_UpperCamelCase = self.normalization(lowerCamelCase_ )
_UpperCamelCase = self.activation(lowerCamelCase_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : int ):
super().__init__()
_UpperCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = config.num_channels
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ):
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(lowerCamelCase_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int = 2 ):
super().__init__()
_UpperCamelCase = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ )
_UpperCamelCase = nn.BatchNormad(lowerCamelCase_ )
def UpperCamelCase_ ( self : Dict , _A : int ):
_UpperCamelCase = self.convolution(lowerCamelCase_ )
_UpperCamelCase = self.normalization(lowerCamelCase_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : int , _A : Dict , _A : Optional[Any] ):
super().__init__()
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
_UpperCamelCase = nn.Sequential(
nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : str , _A : Optional[int] ):
# b c h w -> b c 1 1
_UpperCamelCase = self.pooler(lowerCamelCase_ )
_UpperCamelCase = self.attention(lowerCamelCase_ )
_UpperCamelCase = hidden_state * attention
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : List[str] , _A : str , _A : Any , _A : List[Any] = 1 ):
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : str , _A : List[Any] ):
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCamelCase_ )
_UpperCamelCase = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCamelCase_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Any , _A : Dict , _A : Optional[int] , _A : Tuple , _A : Optional[int] = 1 ):
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Any , _A : Union[str, Any] ):
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCamelCase_ )
_UpperCamelCase = self.shortcut(lowerCamelCase_ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCamelCase_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : Any , _A : Optional[Any] , _A : Optional[int] , _A : Dict = 2 , _A : Union[str, Any] = 2 , ):
super().__init__()
_UpperCamelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for _ in range(depth - 1 )] , )
def UpperCamelCase_ ( self : Any , _A : int ):
_UpperCamelCase = self.layers(lowerCamelCase_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : str , _A : Optional[int] ):
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) )
def UpperCamelCase_ ( self : List[Any] , _A : str , _A : Any = False , _A : Any = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(lowerCamelCase_ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = True
def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, Any] ):
if isinstance(lowerCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase_ ( self : str , _A : int , _A : List[str]=False ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = value
_lowerCAmelCase = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", a__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCAmelCase_ ( a__ ):
def __init__( self : Union[str, Any] , _A : Tuple ):
super().__init__(lowerCamelCase_ )
_UpperCamelCase = config
_UpperCamelCase = RegNetEmbeddings(lowerCamelCase_ )
_UpperCamelCase = RegNetEncoder(lowerCamelCase_ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Any = None , _A : Dict = None ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(lowerCamelCase_ )
_UpperCamelCase = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(lowerCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", a__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCAmelCase_ ( a__ ):
def __init__( self : Optional[int] , _A : str ):
super().__init__(lowerCamelCase_ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = RegNetModel(lowerCamelCase_ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : List[str] , _A : List[Any] = None , _A : Any = None , _A : Any = None , _A : Optional[Any] = None , ):
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(lowerCamelCase_ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 709 | import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self) -> Union[str, Any]:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , """env""")
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Any = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_lowerCamelCase : Tuple = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE , py_version="""py36""" , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv')
@parameterized.expand([(2,)])
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
# create estimator
_lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE)
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_lowerCamelCase : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , SCREAMING_SNAKE_CASE)
| 88 | '''simple docstring'''
from random import randint, random
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 5 , ):
"""simple docstring"""
__UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
__UpperCAmelCase = 0
__UpperCAmelCase = max(UpperCamelCase__ , 0 )
while i < number_of_cells:
__UpperCAmelCase = (
randint(0 , UpperCamelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(UpperCamelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCamelCase__ , -1 )
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = len(UpperCamelCase__ )
# Beforce calculations, the highway is empty
__UpperCAmelCase = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCAmelCase = min(highway_now[car_index] + 1 , UpperCamelCase__ )
# Number of empty cell before the next car
__UpperCAmelCase = get_distance(UpperCamelCase__ , UpperCamelCase__ ) - 1
# We can't have the car causing an accident
__UpperCAmelCase = min(next_highway[car_index] , UpperCamelCase__ )
if random() < probability:
# Randomly, a driver will slow down
__UpperCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = len(highway[0] )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = update(highway[i] , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
__UpperCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCAmelCase = speed
highway.append(UpperCamelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
debug_launcher(test_script.main )
def lowerCamelCase__( self :int ) -> Dict:
debug_launcher(test_ops.main )
| 709 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ''''''
__magic_name__ : List[Any] = ''''''
__magic_name__ : Union[str, Any] = []
__magic_name__ : List[Any] = 0
__magic_name__ : Optional[int] = 256
__magic_name__ : Optional[Any] = 0
__magic_name__ : Any = 0
__magic_name__ : str = 0
__magic_name__ : Tuple = 0
def _UpperCAmelCase ( self : Tuple , snake_case : List[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : int = cva.imread(snake_case , 0 )
__magic_name__ : Any = copy.deepcopy(self.img )
__magic_name__ , __magic_name__ , __magic_name__ : str = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
__magic_name__ : Optional[Any] = np.sum(snake_case )
for i in range(len(snake_case ) ):
__magic_name__ : int = x[i] / self.k
self.sk += prk
__magic_name__ : int = (self.L - 1) * self.sk
if self.rem != 0:
__magic_name__ : Union[str, Any] = int(last % last )
__magic_name__ : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case )
__magic_name__ : List[Any] = int(np.ma.count(self.img ) / self.img[1].size )
__magic_name__ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__magic_name__ : int = self.img[j][i]
if num != self.last_list[num]:
__magic_name__ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
A = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 147 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A = TypeVar("""T""")
A = TypeVar("""U""")
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case : T | None , snake_case : U | None ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[Any] = key
__magic_name__ : str = val
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : str ) -> str:
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : str ) -> None:
'''simple docstring'''
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ , __magic_name__ : Tuple = self.rear, self.head
def __repr__( self : str ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ['''DoubleLinkedList''']
__magic_name__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(snake_case ) )
__magic_name__ : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
__magic_name__ : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ : Dict = node
__magic_name__ : Optional[int] = previous
__magic_name__ : Tuple = node
__magic_name__ : Optional[int] = self.rear
def _UpperCAmelCase ( self : str , snake_case : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__magic_name__ : str = node.next
__magic_name__ : Dict = node.prev
__magic_name__ : Any = None
__magic_name__ : Dict = None
return node
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
snake_case_ = {}
def __init__( self : Dict , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ : str = capacity
__magic_name__ : Tuple = 0
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[str] = 0
__magic_name__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[str] , snake_case : T ) -> bool:
'''simple docstring'''
return key in self.cache
def _UpperCAmelCase ( self : Optional[int] , snake_case : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
__magic_name__ : DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self : str , snake_case : T , snake_case : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ : List[str] = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ : Any = value
self.list.add(snake_case )
@classmethod
def _UpperCAmelCase ( cls : Tuple , snake_case : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(snake_case : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ : Any = LRUCache(snake_case )
__magic_name__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ : Any = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , '''cache_info''' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
A: List[Any] = sorted(string.lower() )
return len(_lowerCamelCase ) == len(set(_lowerCamelCase ) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] =input('Enter a string ').strip()
__SCREAMING_SNAKE_CASE : Optional[int] =is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 135 | '''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 32 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCAmelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[str, Any]=7 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : Tuple=400 , __lowerCAmelCase : List[Any]=3 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = do_resize
_lowerCAmelCase = size if size is not None else {'shortest_edge': 288}
_lowerCAmelCase = size_divisor
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_pad
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
def a ( self : str ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
if not batched:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
_lowerCAmelCase = size / min(__lowerCAmelCase , __lowerCAmelCase )
if h < w:
_lowerCAmelCase , _lowerCAmelCase = size, scale * w
else:
_lowerCAmelCase , _lowerCAmelCase = scale * h, size
_lowerCAmelCase = int((1333 / 800) * size )
if max(__lowerCAmelCase , __lowerCAmelCase ) > max_size:
_lowerCAmelCase = max_size / max(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = newh * scale
_lowerCAmelCase = neww * scale
_lowerCAmelCase , _lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCAmelCase , _lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_lowerCAmelCase = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
"""simple docstring"""
__A = BridgeTowerImageProcessor if is_vision_available() else None
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def a ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'size_divisor' ) )
def a ( self : Dict ):
"""simple docstring"""
pass
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 309 | 0 |
from __future__ import annotations
import pandas as pd
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A = [0] * no_of_processes
__A = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowercase ):
__A = burst_time[i]
__A = 0
__A = 0
__A = 9_9999_9999
__A = 0
__A = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__A = remaining_time[j]
__A = j
__A = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__A = remaining_time[short]
if minm == 0:
__A = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__A = False
# Find finish time of current process
__A = increment_time + 1
# Calculate waiting time
__A = finish_time - arrival_time[short]
__A = finar - burst_time[short]
if waiting_time[short] < 0:
__A = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A = [0] * no_of_processes
for i in range(__lowercase ):
__A = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A = 0
__A = 0
for i in range(__lowercase ):
__A = total_waiting_time + waiting_time[i]
__A = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
snake_case_ : List[str] =int(input())
snake_case_ : List[str] =[0] * no_of_processes
snake_case_ : Optional[int] =[0] * no_of_processes
snake_case_ : Optional[int] =list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
snake_case_ , snake_case_ : str =map(int, input().split())
snake_case_ : Optional[int] =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ : List[str] =burst_time
snake_case_ : Any =no_of_processes
snake_case_ : Any =waiting_time
snake_case_ : List[str] =calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
snake_case_ : Optional[int] =pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 704 |
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert column_title.isupper()
__A = 0
__A = len(lowerCAmelCase__ ) - 1
__A = 0
while index >= 0:
__A = (ord(column_title[index] ) - 64) * pow(26 , lowerCAmelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 205 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
__lowerCamelCase , __lowerCamelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase = list(accumulate(A_ ) )
__lowerCamelCase = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 |
'''simple docstring'''
def lowerCamelCase_ ( A_ = 60_08_51_47_51_43 ):
try:
__lowerCamelCase = int(A_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__lowerCamelCase = 2
__lowerCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCamelCase = i
while n % i == 0:
__lowerCamelCase = n // i
i += 1
return int(A_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : torch.FloatTensor
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self, A = 16, A = 88, A = None, A = None, A = 1, A = 0.0, A = 32, A = None, A = False, A = None, A = "geglu", A = True, A = True, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = attention_head_dim
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE : str = in_channels
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.GroupNorm(num_groups=A, num_channels=A, eps=1E-6, affine=A )
SCREAMING_SNAKE_CASE : Dict = nn.Linear(A, A )
# 3. Define transformers blocks
SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
A, A, A, dropout=A, cross_attention_dim=A, activation_fn=A, attention_bias=A, double_self_attention=A, norm_elementwise_affine=A, )
for d in range(A )
] )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(A, A )
def UpperCamelCase_ ( self, A, A=None, A=None, A=None, A=1, A=None, A = True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_frames // num_frames
SCREAMING_SNAKE_CASE : Optional[int] = hidden_states
SCREAMING_SNAKE_CASE : Dict = hidden_states[None, :].reshape(A, A, A, A, A )
SCREAMING_SNAKE_CASE : Dict = hidden_states.permute(0, 2, 1, 3, 4 )
SCREAMING_SNAKE_CASE : Tuple = self.norm(A )
SCREAMING_SNAKE_CASE : Dict = hidden_states.permute(0, 3, 4, 2, 1 ).reshape(batch_size * height * width, A, A )
SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(A )
# 2. Blocks
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE : Dict = block(
A, encoder_hidden_states=A, timestep=A, cross_attention_kwargs=A, class_labels=A, )
# 3. Output
SCREAMING_SNAKE_CASE : int = self.proj_out(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
hidden_states[None, None, :]
.reshape(A, A, A, A, A )
.permute(0, 3, 4, 1, 2 )
.contiguous()
)
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(A, A, A, A )
SCREAMING_SNAKE_CASE : Any = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=A )
| 508 |
'''simple docstring'''
import socket
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE : Any = socket.gethostname()
SCREAMING_SNAKE_CASE : str = 1_23_12
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE : int = sock.recv(10_24 )
if not data:
break
out_file.write(__UpperCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 508 | 1 |
import math
def A(__a: int , __a: Tuple ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__a )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCamelCase__ = '''Enter the base and the power separated by a comma: '''
lowerCamelCase__ , lowerCamelCase__ = map(int, input(prompt).split(''','''))
lowerCamelCase__ , lowerCamelCase__ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCamelCase__ = res(xa, ya)
lowerCamelCase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 122 |
from __future__ import annotations
def A(__a: list[int] , __a: list[int] , __a: int ):
lowerCAmelCase_ = list(range(len(__a ) ) )
lowerCAmelCase_ = [v / w for v, w in zip(__a , __a )]
index.sort(key=lambda __a : ratio[i] , reverse=__a )
lowerCAmelCase_ = 0
lowerCAmelCase_ = [0] * len(__a )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase_ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
def __lowerCamelCase( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : int = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Dict = '''[PAD]'''
_snake_case : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_12 )
def __lowerCamelCase( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Dict = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_snake_case : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_snake_case : List[str] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def __lowerCamelCase( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[str] = '''Hello World!'''
_snake_case : int = [3_53_89, 66_72, 49, 2]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 719 |
def UpperCAmelCase ( A__ ) -> Tuple:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCAmelCase ( A__ ) -> list[tuple[int, int]]:
_snake_case : Union[str, Any] = 0
_snake_case : List[Any] = len(A__ ) # No of vertices in graph
_snake_case : Union[str, Any] = [0] * n
_snake_case : str = [False] * n
def dfs(A__ , A__ , A__ , A__ ):
_snake_case : Union[str, Any] = True
_snake_case : Tuple = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(A__ , A__ , A__ , id_ )
_snake_case : List[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_snake_case : Optional[Any] = min(low[at] , low[to] )
_snake_case : list[tuple[int, int]] = []
for i in range(A__ ):
if not visited[i]:
dfs(A__ , -1 , A__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> list:
"""simple docstring"""
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
for _ in range(UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 77 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__UpperCAmelCase : Union[str, Any] = n - k
# Calculate C(n,k)
for i in range(UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1)
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
__UpperCAmelCase : Optional[Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase )
if __name__ == "__main__":
A = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 77 | 1 |
import os
import numpy
import onnx
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = a.name
_SCREAMING_SNAKE_CASE = b.name
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = a == b
_SCREAMING_SNAKE_CASE = name_a
_SCREAMING_SNAKE_CASE = name_b
return res
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case__ ,snake_case__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,snake_case__ ,snake_case__ )
_graph_replace_input_with(node_proto.attribute[1].g ,snake_case__ ,snake_case__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(snake_case__ ,snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = list(model.graph.initializer )
_SCREAMING_SNAKE_CASE = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_SCREAMING_SNAKE_CASE = inits[i].name
_SCREAMING_SNAKE_CASE = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = os.path.dirname(snake_case__ )
_SCREAMING_SNAKE_CASE = os.path.basename(snake_case__ )
_SCREAMING_SNAKE_CASE = onnx.load(os.path.join(snake_case__ ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = list(model.graph.initializer )
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
for i in range(len(snake_case__ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(snake_case__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(snake_case__ )
dup_set.add(snake_case__ )
_SCREAMING_SNAKE_CASE = inits[j].data_type
_SCREAMING_SNAKE_CASE = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ ,snake_case__ )
total_reduced_size += mem_size
_SCREAMING_SNAKE_CASE = inits[i].name
_SCREAMING_SNAKE_CASE = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ ,total_reduced_size / 10_24 / 10_24 / 10_24 ,"""GB""" )
_SCREAMING_SNAKE_CASE = sorted(snake_case__ )
_remove_dup_initializers_from_model(snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """optimized_""" + model_file_name
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,snake_case__ )
onnx.save(snake_case__ ,snake_case__ )
return new_model
| 720 |
import numpy as np
def __lowerCamelCase ( snake_case__ ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 569 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
lowercase_ = '1'
lowercase_ = '0'
lowercase_ = '1'
lowercase_ = ort.SessionOptions()
lowercase_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowercase_ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
lowercase_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowercase_ = ort.RunOptions()
lowercase_ = 1_28
lowercase_ = 1
lowercase_ = np.ones((batch, sequence), dtype=np.intaa)
lowercase_ = np.ones((batch, sequence), dtype=np.intaa)
lowercase_ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowercase_ = time.time()
lowercase_ = 20_00
lowercase_ = {}
for iter in range(max_iters):
lowercase_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 10_00 / max_iters))
| 354 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowercase__ : Any="" , lowercase__ : List[str]="train" ):
assert os.path.isdir(lowercase__ )
__lowercase : Optional[Any] = []
__lowercase : Optional[int] = os.listdir(lowercase__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase : Optional[Any] = os.path.join(lowercase__ , lowercase__ )
if not os.path.isfile(lowercase__ ):
continue
self.documents.append(lowercase__ )
def __len__( self : Union[str, Any] ):
return len(self.documents )
def __getitem__( self : int , lowercase__ : int ):
__lowercase : List[str] = self.documents[idx]
__lowercase : List[str] = document_path.split("/" )[-1]
with open(lowercase__ , encoding="utf-8" ) as source:
__lowercase : Optional[int] = source.read()
__lowercase ,__lowercase : Union[str, Any] = process_story(lowercase__ )
return document_name, story_lines, summary_lines
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : int = list(filter(lambda _lowerCamelCase : len(_lowerCamelCase ) != 0, [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase : int = [_add_missing_period(_lowerCamelCase ) for line in nonempty_lines]
# gather article lines
__lowercase : str = []
__lowercase : Union[str, Any] = deque(_lowerCamelCase )
while True:
try:
__lowercase : Any = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(_lowerCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase : str = list(filter(lambda _lowerCamelCase : not t.startswith("@highlight" ), _lowerCamelCase ) )
return story_lines, summary_lines
def snake_case__ ( _lowerCamelCase ) ->Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
if len(_lowerCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCamelCase )) )
return sequence
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = torch.ones_like(_lowerCamelCase )
__lowercase : Any = sequence == pad_token_id
__lowercase : List[str] = 0
return mask
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : Optional[int] = [tokenizer.encode(_lowerCamelCase ) for line in story_lines]
__lowercase : Optional[int] = [token for sentence in story_lines_token_ids for token in sentence]
__lowercase : Any = [tokenizer.encode(_lowerCamelCase ) for line in summary_lines]
__lowercase : Tuple = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
__lowercase : List[str] = []
for sequence in batch:
__lowercase : Dict = -1
__lowercase : str = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCamelCase )
return torch.tensor(_lowerCamelCase )
| 575 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCAmelCase = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_lowerCAmelCase = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_lowerCAmelCase = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def snake_case_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def snake_case_ ( self , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__="auto" , A__=-1 , A__=0.9 , A__=5 , A__=500 , A__="gpt2-large" , A__=-1 , A__=1024 , A__=25 , A__=5 , A__=True , A__=25 , ):
"""simple docstring"""
UpperCAmelCase_: Tuple = compute_mauve(
p_text=A__ , q_text=A__ , p_features=A__ , q_features=A__ , p_tokens=A__ , q_tokens=A__ , num_buckets=A__ , pca_max_data=A__ , kmeans_explained_var=A__ , kmeans_num_redo=A__ , kmeans_max_iter=A__ , featurize_model_name=A__ , device_id=A__ , max_text_length=A__ , divergence_curve_discretization_size=A__ , mauve_scaling_factor=A__ , verbose=A__ , seed=A__ , )
return out | 717 |
import os
def lowercase ( _a = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(_a ) ,_a ) ) as in_file:
UpperCAmelCase_: str = in_file.read()
UpperCAmelCase_: Union[str, Any] = [[int(_a ) for cell in row.split("," )] for row in data.strip().splitlines()]
UpperCAmelCase_: List[Any] = [[0 for cell in row] for row in grid]
UpperCAmelCase_: Any = len(grid[0] )
UpperCAmelCase_: int = [[0 for i in range(_a )] for j in range(_a )]
UpperCAmelCase_: int = grid[0][0]
for i in range(1 ,_a ):
UpperCAmelCase_: List[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 ,_a ):
UpperCAmelCase_: Any = grid[i][0] + dp[i - 1][0]
for i in range(1 ,_a ):
for j in range(1 ,_a ):
UpperCAmelCase_: Union[str, Any] = grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""") | 306 | 0 |
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
_enforce_args(a__ , a__ )
if n == 0:
return 0
lowerCAmelCase :Any = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase :Dict = max(
a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) )
return max_revue
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
_enforce_args(a__ , a__ )
lowerCAmelCase :Dict = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(a__ , a__ , a__ )
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase :Optional[int] = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase :Any = max(
a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , )
lowerCAmelCase :Dict = max_revenue
return max_rev[n]
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
_enforce_args(a__ , a__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase :Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
lowerCAmelCase :List[Any] = 0
for i in range(1 , n + 1 ):
lowerCAmelCase :List[str] = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase :List[Any] = max(a__ , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase :List[str] = max_revenue_i
return max_rev[n]
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if n < 0:
lowerCAmelCase :Optional[int] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(a__ )
if n > len(a__ ):
lowerCAmelCase :Tuple = (
'Each integral piece of rod must have a corresponding price. '
F"""Got n = {n} but length of prices = {len(a__ )}"""
)
raise ValueError(a__ )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :List[str] = [6, 10, 12, 15, 20, 23]
lowerCAmelCase :Optional[Any] = len(a__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase :Union[str, Any] = 36
lowerCAmelCase :Any = top_down_cut_rod(a__ , a__ )
lowerCAmelCase :int = bottom_up_cut_rod(a__ , a__ )
lowerCAmelCase :Optional[Any] = naive_cut_rod_recursive(a__ , a__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 553 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
lowercase_ : int
lowercase_ : TreeNode | None = None
lowercase_ : TreeNode | None = None
__SCREAMING_SNAKE_CASE = namedtuple('CoinsDistribResult', 'moves excess')
def UpperCAmelCase ( a__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(a__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a__ ) != count_coins(a__ ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(a__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase , lowerCAmelCase :Optional[int] = get_distrib(node.left )
lowerCAmelCase , lowerCAmelCase :str = get_distrib(node.right )
lowerCAmelCase :str = 1 - left_distrib_excess
lowerCAmelCase :Optional[Any] = 1 - right_distrib_excess
lowerCAmelCase :Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(a__ )
+ abs(a__ )
)
lowerCAmelCase :Tuple = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a__ , a__ )
return get_distrib(a__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 553 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase__ ( ):
raise RuntimeError('CUDA out of memory.' )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: str ):
super().__init__()
__lowerCamelCase : List[str] = nn.Linear(3 , 4 )
__lowerCamelCase : List[str] = nn.BatchNormad(4 )
__lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def _snake_case ( self: str , a: str ):
return self.lineara(self.batchnorm(self.lineara(a ) ) )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a: str ):
nonlocal batch_sizes
batch_sizes.append(a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a , [128, 64, 32, 16, 8] )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a: Optional[int] , a: Any ):
nonlocal batch_sizes
batch_sizes.append(a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__lowerCamelCase , __lowerCamelCase : Dict = mock_training_loop_function('hello' )
self.assertListEqual(a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _snake_case ( self: Tuple ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a: Dict ):
pass
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _snake_case ( self: Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a: int ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _snake_case ( self: str ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a: List[str] , a: Dict , a: Dict ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _snake_case ( self: Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a: Dict ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _snake_case ( self: Tuple ):
__lowerCamelCase : Any = torch.cuda.memory_allocated()
__lowerCamelCase : Tuple = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a )
__lowerCamelCase : Union[str, Any] = release_memory(a )
self.assertEqual(torch.cuda.memory_allocated() , a )
| 230 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = '▁'
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BigBirdTokenizer
__snake_case = BigBirdTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: int ):
super().setUp()
__lowerCamelCase : str = self.tokenizer_class(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self: str ):
__lowerCamelCase : int = '<s>'
__lowerCamelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(a ) , 1004 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self: Dict ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCamelCase : str = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = tokenizer.tokenize(a )
__lowerCamelCase : List[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase : str = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCamelCase : Tuple = tokenizer.encode(a )
__lowerCamelCase : str = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = BigBirdTokenizer(a , keep_accents=a )
__lowerCamelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , )
__lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCamelCase : int = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _snake_case ( self: Dict ):
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Tuple = 'Hello World!'
__lowerCamelCase : Optional[int] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _snake_case ( self: Dict ):
__lowerCamelCase : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
__lowerCamelCase : int = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def _snake_case ( self: List[Any] ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowerCamelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase : int = ' '.join(a )
__lowerCamelCase : List[str] = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Optional[int] = BigBirdConfig(attention_type='original_full' )
__lowerCamelCase : Any = BigBirdModel(a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def _snake_case ( self: List[Any] ):
# fmt: off
__lowerCamelCase : Optional[Any] = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 230 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = "unispeech-sat"
def __init__( self : Tuple , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : List[Any]=7_68 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Dict=30_72 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : List[Any]="group" , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : int=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=1_28 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : int=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : str=10 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Any=10 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[str]=3_20 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=1_00 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : List[Any]=2_56 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]="mean" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=2_56 , __lowerCAmelCase : int=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCAmelCase : Optional[Any]=(5, 3, 3, 1, 1) , __lowerCAmelCase : str=(1, 2, 3, 1, 1) , __lowerCAmelCase : Union[str, Any]=5_12 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict=5_04 , **__lowerCAmelCase : Optional[Any] , ) -> int:
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_A = hidden_size
_A = feat_extract_norm
_A = feat_extract_activation
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = conv_bias
_A = num_conv_pos_embeddings
_A = num_conv_pos_embedding_groups
_A = len(self.conv_dim )
_A = num_hidden_layers
_A = intermediate_size
_A = hidden_act
_A = num_attention_heads
_A = hidden_dropout
_A = attention_dropout
_A = activation_dropout
_A = feat_proj_dropout
_A = final_dropout
_A = layerdrop
_A = layer_norm_eps
_A = initializer_range
_A = vocab_size
_A = num_clusters
_A = do_stable_layer_norm
_A = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A = apply_spec_augment
_A = mask_time_prob
_A = mask_time_length
_A = mask_time_min_masks
_A = mask_feature_prob
_A = mask_feature_length
_A = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_A = num_codevectors_per_group
_A = num_codevector_groups
_A = contrastive_logits_temperature
_A = feat_quantizer_dropout
_A = num_negatives
_A = codevector_dim
_A = proj_codevector_dim
_A = diversity_loss_weight
# ctc loss
_A = ctc_loss_reduction
_A = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = xvector_output_dim
@property
def snake_case_ ( self : Tuple ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 |
'''simple docstring'''
from typing import List
import numpy as np
def _A ( snake_case ) -> int:
_lowercase : Optional[int] = {key: len(snake_case ) for key, value in gen_kwargs.items() if isinstance(snake_case , snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_lowercase : int = max(lists_lengths.values() , default=0 )
return max(1 , snake_case )
def _A ( snake_case , snake_case ) -> List[range]:
_lowercase : int = []
for group_idx in range(snake_case ):
_lowercase : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowercase : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowercase : Optional[Any] = range(snake_case , start + num_shards_to_add )
shards_indices_per_group.append(snake_case )
return shards_indices_per_group
def _A ( snake_case , snake_case ) -> List[dict]:
_lowercase : Optional[Any] = _number_of_shards_in_gen_kwargs(snake_case )
if num_shards == 1:
return [dict(snake_case )]
else:
_lowercase : Any = _distribute_shards(num_shards=snake_case , max_num_jobs=snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case , snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case ) )
]
def _A ( snake_case ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( snake_case , snake_case ) -> dict:
_lowercase : Any = {len(snake_case ) for value in gen_kwargs.values() if isinstance(snake_case , snake_case )}
_lowercase : Optional[int] = {}
for size in list_sizes:
_lowercase : Optional[Any] = list(range(snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowercase : Dict = dict(snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case , snake_case ):
_lowercase : Tuple = [value[i] for i in indices_per_size[len(snake_case )]]
return shuffled_kwargs
| 245 | 0 |
def lowerCAmelCase_ ( __a = 10 ) -> str:
"""simple docstring"""
if not isinstance(__a , __a ) or n < 0:
raise ValueError("Invalid input" )
lowerCamelCase__: Tuple =10**n
lowerCamelCase__: Optional[int] =28433 * (pow(2 , 7830457 , __a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(10) = }')
| 437 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = TextToVideoSDPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase__: Union[str, Any] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0)
lowerCamelCase__: List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__: Optional[Any] =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: Dict =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: Tuple ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=0) ->Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Optional[int] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Any =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Optional[Any] =self.get_dummy_components()
lowerCamelCase__: List[Any] =TextToVideoSDPipeline(**UpperCAmelCase_)
lowerCamelCase__: int =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Any =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] ="np"
lowerCamelCase__: Optional[int] =sd_pipe(**UpperCAmelCase_).frames
lowerCamelCase__: Dict =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase__: Optional[int] =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy")
lowerCamelCase__: Optional[int] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: str =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: Tuple =pipe.to("cuda")
lowerCamelCase__: List[Any] ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="pt").frames
lowerCamelCase__: str =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy")
lowerCamelCase__: List[str] =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
lowerCamelCase__: Any =pipe.to("cuda")
lowerCamelCase__: Dict ="Spiderman is surfing"
lowerCamelCase__: Dict =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Optional[int] =pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="pt").frames
lowerCamelCase__: List[Any] =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 437 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : UNetaDModel
__lowerCAmelCase : KarrasVeScheduler
def __init__( self ,__snake_case ,__snake_case ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__snake_case ,scheduler=__snake_case )
@torch.no_grad()
def __call__( self ,__snake_case = 1 ,__snake_case = 5_0 ,__snake_case = None ,__snake_case = "pil" ,__snake_case = True ,**__snake_case ,):
"""simple docstring"""
A_ = self.unet.config.sample_size
A_ = (batch_size, 3, img_size, img_size)
A_ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A_ = randn_tensor(__snake_case ,generator=__snake_case ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A_ = self.scheduler.schedule[t]
A_ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A_ , A_ = self.scheduler.add_noise_to_input(__snake_case ,__snake_case ,generator=__snake_case )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A_ = self.scheduler.step(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
A_ = self.scheduler.step_correct(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,step_output.prev_sample ,step_output['''derivative'''] ,)
A_ = step_output.prev_sample
A_ = (sample / 2 + 0.5).clamp(0 ,1 )
A_ = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 188 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A_ = HfArgumentParser(_UpperCAmelCase )
A_ = parser.parse_args_into_dataclasses()[0]
A_ = TensorFlowBenchmark(args=_UpperCAmelCase )
try:
A_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A_ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
A_ = ''' '''.join(str(_UpperCAmelCase ).split(''' ''' )[:-1] )
A_ = ''''''
A_ = eval(str(_UpperCAmelCase ).split(''' ''' )[-1] )
A_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
A_ = full_error_msg + begin_error_msg + str(_UpperCAmelCase )
raise ValueError(_UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 188 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A_ :
'''simple docstring'''
def __init__( self , snake_case = "cpu" , snake_case = "openai/clip-vit-large-patch14" ):
lowercase = device
lowercase = CLIPTokenizerFast.from_pretrained(snake_case )
lowercase = [0.48_145_466, 0.4_578_275, 0.40_821_073]
lowercase = [0.26_862_954, 0.26_130_258, 0.27_577_711]
lowercase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowercase = torchvision.transforms.Resize(224 )
lowercase = torchvision.transforms.CenterCrop(224 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.resize(snake_case )
lowercase = self.center_crop(snake_case )
lowercase = self.normalize(snake_case )
return images
def __call__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = self.tokenizer(text=snake_case , **snake_case )
lowercase = self.preprocess_img(snake_case )
lowercase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case=10 , snake_case=0.01 , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , snake_case=True , snake_case="image" , snake_case=True , snake_case=False , snake_case=False , snake_case=False , ):
super().__init__()
lowercase = None
lowercase = device if device else get_device()
if vqgan:
lowercase = vqgan
else:
lowercase = load_vqgan(self.device , conf_path=snake_case , ckpt_path=snake_case )
self.vqgan.eval()
if clip:
lowercase = clip
else:
lowercase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
lowercase = ProcessorGradientFlow(device=self.device )
lowercase = iterations
lowercase = lr
lowercase = log
lowercase = make_grid
lowercase = return_val
lowercase = quantize
lowercase = self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=5 , snake_case=True ):
lowercase = []
if output_path is None:
lowercase = './animation.gif'
if input_path is None:
lowercase = self.save_path
lowercase = sorted(glob(input_path + '/*' ) )
if not len(snake_case ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(snake_case ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
lowercase = total_duration / len(snake_case )
lowercase = [frame_duration] * len(snake_case )
if extend_frames:
lowercase = 1.5
lowercase = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(snake_case ) )
imageio.mimsave(snake_case , snake_case , duration=snake_case )
print(F'''gif saved to {output_path}''' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None ):
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
lowercase = preprocess(Image.open(snake_case ) , target_image_size=256 ).to(self.device )
lowercase = preprocess_vqgan(snake_case )
lowercase = self.vqgan.encode(snake_case )
return z
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.latent.detach().requires_grad_()
lowercase = base_latent + transform_vector
if self.quantize:
lowercase = self.vqgan.quantize(snake_case )
else:
lowercase = trans_latent
return self.vqgan.decode(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=None ):
lowercase = self.clip_preprocessor(text=snake_case , images=snake_case , return_tensors='pt' , padding=snake_case )
lowercase = self.clip(**snake_case )
lowercase = clip_outputs.logits_per_image
if weights is not None:
lowercase = similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self._get_clip_similarity(pos_prompts['prompts'] , snake_case , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
lowercase = self._get_clip_similarity(neg_prompts['prompts'] , snake_case , weights=neg_prompts['weights'] )
else:
lowercase = torch.tensor([1] , device=self.device )
lowercase = -torch.log(snake_case ) + torch.log(snake_case )
return loss
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = torch.randn_like(self.latent , requires_grad=snake_case , device=self.device )
lowercase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowercase = self._add_vector(snake_case )
lowercase = loop_post_process(snake_case )
lowercase = self._get_CLIP_loss(snake_case , snake_case , snake_case )
print('CLIP loss' , snake_case )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=snake_case )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
wandb.init(reinit=snake_case , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
lowercase = Image.open(snake_case )
lowercase = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not prompts:
return []
lowercase = []
lowercase = []
if isinstance(snake_case , snake_case ):
lowercase = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(snake_case , (tuple, list) ):
lowercase = prompt[0]
lowercase = float(prompt[1] )
elif ":" in prompt:
lowercase = prompt.split(':' )
lowercase = float(snake_case )
else:
lowercase = prompt
lowercase = 1.0
processed_prompts.append(snake_case )
weights.append(snake_case )
return {
"prompts": processed_prompts,
"weights": torch.tensor(snake_case , device=self.device ),
}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=None , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=None , ):
if image_path:
lowercase = self._get_latent(snake_case )
else:
lowercase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(snake_case , snake_case , snake_case )
assert pos_prompts, "You must provide at least one positive prompt."
lowercase = self.process_prompts(snake_case )
lowercase = self.process_prompts(snake_case )
if save_final and save_path is None:
lowercase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
else:
lowercase = save_path + '_' + get_timestamp()
os.makedirs(snake_case )
lowercase = save_path
lowercase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(snake_case ) )
lowercase = loop_post_process(snake_case )
for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case , snake_case , snake_case ) ):
if show_intermediate:
show_pil(snake_case )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'Image': wandb.Image(snake_case )} )
if show_final:
show_pil(snake_case )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 703 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "model" in orig_key:
lowercase = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
lowercase = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
lowercase = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
lowercase = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
lowercase = orig_key.split('.' )[0].split('_' )[-1]
lowercase = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowercase = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
lowercase = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
lowercase = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
lowercase = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
lowercase = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
lowercase = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
lowercase = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
lowercase = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
lowercase = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
lowercase = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
lowercase = 'yoso.' + orig_key
return orig_key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase = val
lowercase = orig_state_dict['cls.predictions.decoder.bias']
lowercase = torch.arange(__SCREAMING_SNAKE_CASE ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model_state_dict']
lowercase = YosoConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase = YosoForMaskedLM(__SCREAMING_SNAKE_CASE )
lowercase = convert_checkpoint_helper(config.max_position_embeddings , __SCREAMING_SNAKE_CASE )
print(model.load_state_dict(__SCREAMING_SNAKE_CASE ) )
model.eval()
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 565 | 0 |
from datetime import datetime
import requests
def A ( lowercase__ : str ) -> bytes:
UpperCamelCase__ :Any = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
UpperCamelCase__ :Optional[int] = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowercase__ ).content
if __name__ == "__main__":
UpperCamelCase = input("Enter Video/IGTV url: ").strip()
UpperCamelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
debug_launcher(test_ops.main )
| 709 |
def __lowerCAmelCase ( snake_case : int = 100 ) -> int:
__lowerCamelCase: List[Any] = 0
__lowerCamelCase: int = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return [ord(__A) - 96 for elem in plain]
def lowerCAmelCase (__A):
"""simple docstring"""
return "".join(chr(elem + 96) for elem in encoded)
def lowerCAmelCase ():
"""simple docstring"""
_a = encode(input('''-> ''').strip().lower())
print('''Encoded: ''' , __A)
print('''Decoded:''' , decode(__A))
if __name__ == "__main__":
main()
| 11 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = IFInpaintingPipeline
lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCamelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self ):
'''simple docstring'''
return self._get_dummy_components()
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowercase_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 100 | 0 |
"""simple docstring"""
import numpy as np
def UpperCamelCase ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : int = 100 , ):
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase )
__a = np.iscomplexobj(_lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__a = False
__a = 0
__a = 0
__a = 1E12
while not convergence:
# Multiple matrix by the vector.
__a = np.dot(_lowerCAmelCase , _lowerCAmelCase )
# Normalize the resulting output vector.
__a = w / np.linalg.norm(_lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__a = vector.conj().T if is_complex else vector.T
__a = np.dot(_lowerCAmelCase , np.dot(_lowerCAmelCase , _lowerCAmelCase ) )
# Check convergence.
__a = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__a = True
__a = lambda_
if is_complex:
__a = np.real(lambda_ )
return lambda_, vector
def UpperCamelCase ( ):
__a = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__a = np.array([41, 4, 20] )
__a = real_input_matrix.astype(np.complexaaa )
__a = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__a = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__a = real_input_matrix
__a = real_vector
elif problem_type == "complex":
__a = complex_input_matrix
__a = complex_vector
# Our implementation.
__a , __a = power_iteration(_lowerCAmelCase , _lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__a , __a = np.linalg.eigh(_lowerCAmelCase )
# Last eigenvalue is the maximum one.
__a = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__a = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 173 | """simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__A = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
__A = {
"""169M""": 7_68,
"""430M""": 10_24,
"""1B5""": 20_48,
"""3B""": 25_60,
"""7B""": 40_96,
"""14B""": 51_20,
}
def UpperCamelCase ( _lowerCAmelCase : Tuple ):
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_lowerCAmelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
__a = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__a = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
__a = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _lowerCAmelCase )
# ffn -> feed_forward
__a = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__a = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__a = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__a = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
__a = """rwkv.""" + name
__a = weight
return state_dict
def UpperCamelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : int=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__a = 50277
__a = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_lowerCAmelCase )
__a = len(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
__a = RwkvConfig(
vocab_size=_lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_lowerCAmelCase )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_lowerCAmelCase , _lowerCAmelCase )
__a = torch.load(_lowerCAmelCase , map_location="""cpu""" )
__a = convert_state_dict(_lowerCAmelCase )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
if index is not None:
__a = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
# Save the index as well
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
__a = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + """\n"""
f.write(_lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__a = AutoModelForCausalLM.from_pretrained(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__A = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 173 | 1 |
from maths.prime_check import is_prime
def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__ : Any = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCAmelCase )
if is_prime(__UpperCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
import torch
from torch import nn
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False ) -> Tuple:
super().__init__()
a__ = n_token
a__ = d_embed
a__ = d_proj
a__ = cutoffs + [n_token]
a__ = [0] + self.cutoffs
a__ = div_val
a__ = self.cutoffs[0]
a__ = len(self.cutoffs ) - 1
a__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
a__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
a__ = nn.Parameter(torch.zeros(self.n_clusters ) )
a__ = nn.ModuleList()
a__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
else:
self.out_projs.append(SCREAMING_SNAKE_CASE )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
else:
for i in range(len(self.cutoffs ) ):
a__ , a__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE , r_idx - l_idx ) )
a__ = keep_order
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if proj is None:
a__ = nn.functional.linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
a__ = nn.functional.linear(SCREAMING_SNAKE_CASE , proj.t().contiguous() )
a__ = nn.functional.linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if labels is not None:
# Shift so that tokens < n predict n
a__ = hidden[..., :-1, :].contiguous()
a__ = labels[..., 1:].contiguous()
a__ = hidden.view(-1 , hidden.size(-1 ) )
a__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
a__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
a__ = self._compute_logit(SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
a__ = labels != -1_0_0
a__ = torch.zeros_like(SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
a__ = (
-nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
a__ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
a__ , a__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a__ , a__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ = self.out_layers[0].weight[l_idx:r_idx]
a__ = self.out_layers[0].bias[l_idx:r_idx]
else:
a__ = self.out_layers[i].weight
a__ = self.out_layers[i].bias
if i == 0:
a__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE )
biases.append(SCREAMING_SNAKE_CASE )
a__ , a__ , a__ = weights[0], biases[0], self.out_projs[0]
a__ = self._compute_logit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=1 )
if labels is None:
a__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
a__ = torch.zeros_like(SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
a__ = 0
a__ = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
a__ , a__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
a__ = (labels >= l_idx) & (labels < r_idx)
a__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
a__ = labels.index_select(0 , SCREAMING_SNAKE_CASE ) - l_idx
a__ = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE )
a__ = hidden.index_select(0 , SCREAMING_SNAKE_CASE )
else:
a__ = hidden
if i == 0:
if labels is not None:
a__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
a__ = head_logprob[:, : self.cutoffs[0]]
else:
a__ , a__ , a__ = weights[i], biases[i], self.out_projs[i]
a__ = self._compute_logit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=1 )
a__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
a__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
a__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
a__ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , SCREAMING_SNAKE_CASE , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
if self.n_clusters == 0:
a__ = self._compute_logit(SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
a__ , a__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a__ , a__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ = self.out_layers[0].weight[l_idx:r_idx]
a__ = self.out_layers[0].bias[l_idx:r_idx]
else:
a__ = self.out_layers[i].weight
a__ = self.out_layers[i].bias
if i == 0:
a__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE )
biases.append(SCREAMING_SNAKE_CASE )
a__ , a__ , a__ = weights[0], biases[0], self.out_projs[0]
a__ = self._compute_logit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
a__ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=1 )
a__ = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
a__ , a__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
a__ = head_logprob[:, : self.cutoffs[0]]
else:
a__ , a__ , a__ = weights[i], biases[i], self.out_projs[i]
a__ = self._compute_logit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=1 )
a__ = head_logprob[:, -i] + tail_logprob_i
a__ = logprob_i
return out
| 194 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
a = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
a = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
a = model(__lowerCAmelCase )["last_hidden_state"]
a = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice.
a = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 32 |
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def _A (UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
return line.startswith(UpperCamelCase ) or len(UpperCamelCase ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCamelCase ) is not None
def _A (UpperCamelCase : List[Any] ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ : str = object_name.split(""".""" )
lowerCamelCase__ : Tuple = 0
# First let's find the module where our object lives.
lowerCamelCase__ : int = parts[i]
while i < len(UpperCamelCase ) and not os.path.isfile(os.path.join(UpperCamelCase , f"{module}.py" ) ):
i += 1
if i < len(UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , parts[i] )
if i >= len(UpperCamelCase ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(UpperCamelCase , f"{module}.py" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase__ : str = """"""
lowerCamelCase__ : List[str] = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase ) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase__ : Optional[int] = line_index
while line_index < len(UpperCamelCase ) and _should_continue(lines[line_index] , UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase__ : Optional[Any] = lines[start_index:line_index]
return "".join(UpperCamelCase )
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def _A (UpperCamelCase : Tuple ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Tuple = code.split("""\n""" )
lowerCamelCase__ : List[Any] = 0
while idx < len(UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def _A (UpperCamelCase : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = len(get_indent(UpperCamelCase ) ) > 0
if has_indent:
lowerCamelCase__ : List[str] = f"class Bla:\n{code}"
lowerCamelCase__ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = black.format_str(UpperCamelCase , mode=UpperCamelCase )
lowerCamelCase__ ,lowerCamelCase__ : Dict = style_docstrings_in_code(UpperCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _A (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any]=False ) ->Tuple:
'''simple docstring'''
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[Any] = f.readlines()
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase ):
lowerCamelCase__ : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : List[str] = search.groups()
lowerCamelCase__ : Tuple = find_code_in_diffusers(UpperCamelCase )
lowerCamelCase__ : int = get_indent(UpperCamelCase )
lowerCamelCase__ : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase__ : Optional[Any] = theoretical_indent
lowerCamelCase__ : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase__ : Optional[Any] = True
while line_index < len(UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase ):
break
lowerCamelCase__ : Dict = lines[line_index]
lowerCamelCase__ : List[Any] = _should_continue(UpperCamelCase , UpperCamelCase ) and re.search(f"^{indent}# End copy" , UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase__ : List[str] = lines[start_index:line_index]
lowerCamelCase__ : Union[str, Any] = """""".join(UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCamelCase ) is None]
lowerCamelCase__ : Optional[int] = """\n""".join(UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase ) > 0:
lowerCamelCase__ : Any = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCamelCase__ : Dict = [_re_replace_pattern.search(UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Tuple = pattern.groups()
lowerCamelCase__ : List[Any] = re.sub(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if option.strip() == "all-casing":
lowerCamelCase__ : Optional[Any] = re.sub(obja.lower() , obja.lower() , UpperCamelCase )
lowerCamelCase__ : List[Any] = re.sub(obja.upper() , obja.upper() , UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase__ : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase__ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase__ : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase__ : Tuple = start_index + 1
if overwrite and len(UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
return diffs
def _A (UpperCamelCase : bool = False ) ->Any:
'''simple docstring'''
lowerCamelCase__ : str = glob.glob(os.path.join(UpperCamelCase , """**/*.py""" ) , recursive=UpperCamelCase )
lowerCamelCase__ : Optional[int] = []
for filename in all_files:
lowerCamelCase__ : Tuple = is_copy_consistent(UpperCamelCase , UpperCamelCase )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(UpperCamelCase ) > 0:
lowerCamelCase__ : Dict = """\n""".join(UpperCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 157 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowercase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_lowercase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowercase = '''allenai'''
def _A (UpperCamelCase : Tuple ) ->Any:
'''simple docstring'''
lowerCamelCase__ : str = dict((re.sub(r"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : List[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
lowerCamelCase__ : List[str] = d[k] # restore
return da
def _A (UpperCamelCase : str , UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
assert os.path.exists(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCamelCase__ : str = basename(UpperCamelCase )
lowerCamelCase__ : str = dirname(UpperCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : Tuple = cls.hub_models()
lowerCamelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowerCamelCase__ : List[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}" )
lowerCamelCase__ : List[str] = hub_utils.from_pretrained(
UpperCamelCase , UpperCamelCase , UpperCamelCase , archive_map=UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = vars(chkpt["""args"""]["""model"""] )
lowerCamelCase__ : Union[str, Any] = args["""source_lang"""]
lowerCamelCase__ : Dict = args["""target_lang"""]
lowerCamelCase__ : List[Any] = dirname(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(UpperCamelCase )
# dicts
lowerCamelCase__ : int = os.path.join(UpperCamelCase , f"dict.{src_lang}.txt" )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , f"dict.{tgt_lang}.txt" )
lowerCamelCase__ : Any = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , """vocab-src.json""" )
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Dict = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : Tuple = False
break
lowerCamelCase__ : Union[str, Any] = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : str = len(UpperCamelCase )
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , """vocab-tgt.json""" )
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.exists(UpperCamelCase ):
break
with open(UpperCamelCase , encoding="""utf-8""" ) as fin:
lowerCamelCase__ : Dict = fin.read()
lowerCamelCase__ : Tuple = re.sub(r""" \d+$""" , """""" , UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"Generating {merges_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase )
# model config
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
lowerCamelCase__ : str = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowerCamelCase__ : Dict = 5
lowerCamelCase__ : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowerCamelCase__ : Union[str, Any] = 1.0
print(f"Generating {fsmt_model_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# tokenizer config
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# model
lowerCamelCase__ : Tuple = chkpt["""models"""][0]
lowerCamelCase__ : Tuple = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : Tuple = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = FSMTConfig.from_pretrained(UpperCamelCase )
lowerCamelCase__ : List[str] = FSMTForConditionalGeneration(UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
# save
lowerCamelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(UpperCamelCase , UpperCamelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"cd {data_root}" )
print(f"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 157 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> list:
if len(UpperCamelCase ) <= 1:
return [tuple(UpperCamelCase )]
UpperCAmelCase_ : str = []
def generate(UpperCamelCase ,UpperCamelCase ):
UpperCAmelCase_ : List[str] = [0] * n
res.append(tuple(UpperCamelCase ) )
UpperCAmelCase_ : str = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = arr[i], arr[0]
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase ) )
c[i] += 1
UpperCAmelCase_ : Optional[Any] = 0
else:
UpperCAmelCase_ : Dict = 0
i += 1
generate(len(UpperCamelCase ) ,UpperCamelCase )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 471 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Any = np.argmax(UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[str]:
with open(UpperCamelCase ,encoding='utf_8' ) as f:
UpperCAmelCase_ : int = csv.reader(UpperCamelCase )
UpperCAmelCase_ : Tuple = []
next(UpperCamelCase ) # skip the first line
for line in tqdm(UpperCamelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : str = []
for dataset in encoded_datasets:
UpperCAmelCase_ : str = len(UpperCamelCase )
UpperCAmelCase_ : str = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
UpperCAmelCase_ : List[Any] = np.zeros((n_batch, 2) ,dtype=np.intaa )
UpperCAmelCase_ : Any = np.full((n_batch, 2, input_len) ,fill_value=-1_0_0 ,dtype=np.intaa )
UpperCAmelCase_ : List[Any] = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Tuple = with_conta
UpperCAmelCase_ : Union[str, Any] = len(UpperCamelCase ) - 1
UpperCAmelCase_ : int = len(UpperCamelCase ) - 1
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Any = with_conta
UpperCAmelCase_ : Optional[int] = mc_label
UpperCAmelCase_ : Tuple = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE( ) -> str:
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' ,type=UpperCamelCase ,default='openai-gpt' ,help='pretrained model name' )
parser.add_argument('--do_train' ,action='store_true' ,help='Whether to run training.' )
parser.add_argument('--do_eval' ,action='store_true' ,help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' ,default=UpperCamelCase ,type=UpperCamelCase ,required=UpperCamelCase ,help='The output directory where the model predictions and checkpoints will be written.' ,)
parser.add_argument('--train_dataset' ,type=UpperCamelCase ,default='' )
parser.add_argument('--eval_dataset' ,type=UpperCamelCase ,default='' )
parser.add_argument('--seed' ,type=UpperCamelCase ,default=4_2 )
parser.add_argument('--num_train_epochs' ,type=UpperCamelCase ,default=3 )
parser.add_argument('--train_batch_size' ,type=UpperCamelCase ,default=8 )
parser.add_argument('--eval_batch_size' ,type=UpperCamelCase ,default=1_6 )
parser.add_argument('--adam_epsilon' ,default=1e-8 ,type=UpperCamelCase ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,type=UpperCamelCase ,default=1 )
parser.add_argument(
'--max_steps' ,default=-1 ,type=UpperCamelCase ,help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=UpperCamelCase ,default=1 ,help='Number of updates steps to accumulate before performing a backward/update pass.' ,)
parser.add_argument('--learning_rate' ,type=UpperCamelCase ,default=6.25e-5 )
parser.add_argument('--warmup_steps' ,default=0 ,type=UpperCamelCase ,help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' ,type=UpperCamelCase ,default='warmup_linear' )
parser.add_argument('--weight_decay' ,type=UpperCamelCase ,default=0.01 )
parser.add_argument('--lm_coef' ,type=UpperCamelCase ,default=0.9 )
parser.add_argument('--n_valid' ,type=UpperCamelCase ,default=3_7_4 )
parser.add_argument('--server_ip' ,type=UpperCamelCase ,default='' ,help='Can be used for distant debugging.' )
parser.add_argument('--server_port' ,type=UpperCamelCase ,default='' ,help='Can be used for distant debugging.' )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
print(UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase_ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(UpperCamelCase ,UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase_ : Optional[int] = ['_start_', '_delimiter_', '_classify_']
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase ) )
model.to(UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase ):
if isinstance(UpperCamelCase ,UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase ) )
elif isinstance(UpperCamelCase ,UpperCamelCase ):
return obj
return [tokenize_and_encode(UpperCamelCase ) for o in obj]
logger.info('Encoding dataset...' )
UpperCAmelCase_ : List[str] = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase_ : Optional[Any] = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase_ : Dict = (train_dataset, eval_dataset)
UpperCAmelCase_ : Union[str, Any] = tokenize_and_encode(UpperCamelCase )
# Compute the max input length for the Transformer
UpperCAmelCase_ : Dict = model.config.n_positions // 2 - 2
UpperCAmelCase_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase_ : Dict = min(UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase_ : Optional[int] = pre_process_datasets(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,*UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase_ : Optional[int] = TensorDataset(*UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = RandomSampler(UpperCamelCase )
UpperCAmelCase_ : int = DataLoader(UpperCamelCase ,sampler=UpperCamelCase ,batch_size=args.train_batch_size )
UpperCAmelCase_ : int = TensorDataset(*UpperCamelCase )
UpperCAmelCase_ : Optional[int] = SequentialSampler(UpperCamelCase )
UpperCAmelCase_ : Tuple = DataLoader(UpperCamelCase ,sampler=UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase_ : List[str] = args.max_steps
UpperCAmelCase_ : List[str] = args.max_steps // (len(UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase_ : List[str] = len(UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase_ : Any = list(model.named_parameters() )
UpperCAmelCase_ : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
UpperCAmelCase_ : Dict = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
UpperCAmelCase_ : Any = AdamW(UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=UpperCamelCase )
if args.do_train:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='Epoch' ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = tqdm(UpperCamelCase ,desc='Training' )
for step, batch in enumerate(UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = tuple(t.to(UpperCamelCase ) for t in batch )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch
UpperCAmelCase_ : Dict = model(UpperCamelCase ,mc_token_ids=UpperCamelCase ,lm_labels=UpperCamelCase ,mc_labels=UpperCamelCase )
UpperCAmelCase_ : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase_ : Dict = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase_ : List[Any] = 'Training loss: {:.2e} lr: {:.2e}'.format(UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase_ : int = model.module if hasattr(UpperCamelCase ,'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase_ : int = os.path.join(args.output_dir ,UpperCamelCase )
UpperCAmelCase_ : Any = os.path.join(args.output_dir ,UpperCamelCase )
torch.save(model_to_save.state_dict() ,UpperCamelCase )
model_to_save.config.to_json_file(UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase )
if args.do_eval:
model.eval()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 0, 0
UpperCAmelCase_ , UpperCAmelCase_ : Any = 0, 0
for batch in tqdm(UpperCamelCase ,desc='Evaluating' ):
UpperCAmelCase_ : Union[str, Any] = tuple(t.to(UpperCamelCase ) for t in batch )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = batch
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = model(
UpperCamelCase ,mc_token_ids=UpperCamelCase ,lm_labels=UpperCamelCase ,mc_labels=UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = mc_logits.detach().cpu().numpy()
UpperCAmelCase_ : int = mc_labels.to('cpu' ).numpy()
UpperCAmelCase_ : Optional[int] = accuracy(UpperCamelCase ,UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase_ : str = eval_loss / nb_eval_steps
UpperCAmelCase_ : str = eval_accuracy / nb_eval_examples
UpperCAmelCase_ : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase_ : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
UpperCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir ,'eval_results.txt' )
with open(UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' ,UpperCamelCase ,str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 471 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class lowercase(_lowercase ):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return {}, {}, {}
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
a__ = load_image(__SCREAMING_SNAKE_CASE )
a__ = image.size
a__ = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
a__ = self.model(**__SCREAMING_SNAKE_CASE )
return model_outputs
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
a__ = model_outputs.predicted_depth
a__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=__SCREAMING_SNAKE_CASE )
a__ = prediction.squeeze().cpu().numpy()
a__ = (output * 2_5_5 / np.max(__SCREAMING_SNAKE_CASE )).astype('uint8' )
a__ = Image.fromarray(__SCREAMING_SNAKE_CASE )
a__ = {}
a__ = predicted_depth
a__ = depth
return output_dict
| 273 |
"""simple docstring"""
from math import ceil
def __magic_name__ ( UpperCamelCase : int = 1001 ) -> int:
a__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
a__ = 2 * i + 1
a__ = 2 * i
a__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 273 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = ProphetNetTokenizer
_UpperCamelCase = False
def _UpperCAmelCase ( self ):
super().setUp()
_lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = '''UNwant\u00E9d,running'''
_lowerCamelCase = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.tokenizer_class(self.vocab_file )
_lowerCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(a__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [9, 6, 7, 12, 10, 11] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = BasicTokenizer(do_lower_case=a__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _UpperCAmelCase ( self ):
_lowerCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_lowerCamelCase = {}
for i, token in enumerate(a__ ):
_lowerCamelCase = i
_lowerCamelCase = WordpieceTokenizer(vocab=a__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowerCamelCase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_lowerCamelCase = tokenizer(a__ , padding=a__ , return_tensors='''pt''' )
self.assertIsInstance(a__ , a__ )
_lowerCamelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
_lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 297 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = test_results.split(''' ''' )
_lowerCamelCase = 0
_lowerCamelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_lowerCamelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = {}
_lowerCamelCase = None
_lowerCamelCase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , _a ):
_lowerCamelCase = True
_lowerCamelCase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
_lowerCamelCase = line
_lowerCamelCase = False
return failures
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ ):
_lowerCamelCase = title
_lowerCamelCase = doc_test_results['''time_spent'''].split(''',''' )[0]
_lowerCamelCase = doc_test_results['''success''']
_lowerCamelCase = doc_test_results['''failures''']
_lowerCamelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_lowerCamelCase = doc_test_results
@property
def _UpperCAmelCase ( self ):
_lowerCamelCase = [self._time_spent]
_lowerCamelCase = 0
for time in time_spent:
_lowerCamelCase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(a__ ) == 1:
_lowerCamelCase = [0, 0, time_parts[0]]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'''{int(a__ )}h{int(a__ )}m{int(a__ )}s'''
@property
def _UpperCAmelCase ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _UpperCAmelCase ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def _UpperCAmelCase ( self ):
_lowerCamelCase = 40
_lowerCamelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(a__ , a__ )}
_lowerCamelCase = ''''''
for category, failures in category_failures.items():
if len(a__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(a__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCAmelCase ( self ):
_lowerCamelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(a__ )
@staticmethod
def _UpperCAmelCase ( ):
_lowerCamelCase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(a__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=a__ , )
def _UpperCAmelCase ( self ):
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
_lowerCamelCase = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
_lowerCamelCase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=a__ , )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ):
_lowerCamelCase = ''''''
for key, value in failures.items():
_lowerCamelCase = value[:2_00] + ''' [Truncated]''' if len(a__ ) > 2_50 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
_lowerCamelCase = job_name
_lowerCamelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
_lowerCamelCase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase ( self ):
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
_lowerCamelCase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
_lowerCamelCase = sorted(self.doc_test_results.items() , key=lambda a__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
_lowerCamelCase = f'''*Num failures* :{len(job_result['failed'] )} \n'''
_lowerCamelCase = job_result['''failures''']
_lowerCamelCase = self.get_reply_blocks(a__ , a__ , a__ , text=a__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'''Results for {job}''' , blocks=a__ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def _lowerCamelCase ( ):
"""simple docstring"""
_lowerCamelCase = os.environ['''GITHUB_RUN_ID''']
_lowerCamelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_lowerCamelCase = requests.get(_a ).json()
_lowerCamelCase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_lowerCamelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_a ):
_lowerCamelCase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , _a )
return {}
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = {}
if os.path.exists(_a ):
_lowerCamelCase = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(_a , _a )}.''' ) from e
return _artifact
def _lowerCamelCase ( ):
"""simple docstring"""
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ ):
_lowerCamelCase = name
_lowerCamelCase = []
def __str__( self ):
return self.name
def _UpperCAmelCase ( self , a__ ):
self.paths.append({'''name''': self.name, '''path''': path} )
_lowerCamelCase = {}
_lowerCamelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_lowerCamelCase = directory
if artifact_name not in _available_artifacts:
_lowerCamelCase = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_UpperCAmelCase = get_job_links()
_UpperCAmelCase = retrieve_available_artifacts()
_UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_UpperCAmelCase = github_actions_job_links.get("run_doctests")
_UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = handle_test_results(artifact["stats"])
_UpperCAmelCase = failed
_UpperCAmelCase = success
_UpperCAmelCase = time_spent[1:-1] + ", "
_UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_UpperCAmelCase = line.replace("FAILED ", "")
_UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
_UpperCAmelCase , _UpperCAmelCase = line.split("::")
else:
_UpperCAmelCase , _UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
_UpperCAmelCase = failure
break
_UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 297 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [[0 for _ in range(__a )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 ,__a ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase__ = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
UpperCAmelCase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 720 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
def __init__( self : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 275 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.