code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from math import factorial
def UpperCamelCase_( _snake_case : int = 100 ):
"""simple docstring"""
return sum(int(_snake_case ) for x in str(factorial(_snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 308
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
| 1
|
_lowerCAmelCase : Dict = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Any , _snake_case : Dict ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[s]
__a =True
while queue:
__a =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_snake_case )
__a =True
__a =u
return visited[t]
def UpperCamelCase_( _snake_case : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =[-1] * (len(_snake_case ))
__a =0
__a =[]
__a =[i[:] for i in graph] # Record original cut, copy.
while bfs(_snake_case , _snake_case , _snake_case , _snake_case ):
__a =float('Inf' )
__a =sink
while s != source:
# Find the minimum value in select path
__a =min(_snake_case , graph[parent[s]][s] )
__a =parent[s]
max_flow += path_flow
__a =sink
while v != source:
__a =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a =parent[v]
for i in range(len(_snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 308
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308
| 1
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
__a =question_encoder
__a =generator
__a =self.question_encoder
def __magic_name__ ( self , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
if os.path.isfile(__snake_case ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__snake_case , exist_ok=__snake_case )
__a =os.path.join(__snake_case , 'question_encoder_tokenizer' )
__a =os.path.join(__snake_case , 'generator_tokenizer' )
self.question_encoder.save_pretrained(__snake_case )
self.generator.save_pretrained(__snake_case )
@classmethod
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> Tuple:
'''simple docstring'''
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__a =kwargs.pop('config' , __snake_case )
if config is None:
__a =RagConfig.from_pretrained(__snake_case )
__a =AutoTokenizer.from_pretrained(
__snake_case , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
__a =AutoTokenizer.from_pretrained(
__snake_case , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=__snake_case , generator=__snake_case )
def __call__( self , *__snake_case , **__snake_case ) -> Union[str, Any]:
'''simple docstring'''
return self.current_tokenizer(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> List[Any]:
'''simple docstring'''
return self.generator.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
return self.generator.decode(*__snake_case , **__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.question_encoder
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.generator
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = "longest" , __snake_case = None , __snake_case = True , **__snake_case , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , __snake_case , )
if max_length is None:
__a =self.current_tokenizer.model_max_length
__a =self(
__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , max_length=__snake_case , padding=__snake_case , truncation=__snake_case , **__snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a =self.current_tokenizer.model_max_length
__a =self(
text_target=__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case , **__snake_case , )
__a =labels['input_ids']
return model_inputs
| 308
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'AutoImageProcessor'
SCREAMING_SNAKE_CASE = 'AutoTokenizer'
def __init__( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
__a =self.image_processor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ) -> int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__a =self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
__a =self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
__a =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 308
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : str = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase : Any = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_lowerCAmelCase : str = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
_lowerCAmelCase : Optional[Any] = "▁"
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
__a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__a =vocab_file
__a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
__a ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a =len(self.sp_model ) - 1
__a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a =[self.cls_token_id]
__a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return len(self.sp_model )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a ={self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a =self.sp_model.PieceToId(__snake_case )
return spm_id if spm_id else self.unk_token_id
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__snake_case )
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a =[]
__a =''
__a =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
__a =True
__a =[]
else:
current_sub_tokens.append(__snake_case )
__a =False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.__dict__.copy()
__a =None
return state
def __setstate__( self , __snake_case ) -> str:
'''simple docstring'''
__a =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a ={}
__a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__a =os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , 'wb' ) as fi:
__a =self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 308
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case = True , __snake_case = False ) -> Optional[int]:
'''simple docstring'''
__a =scheduler
__a =optimizers if isinstance(__snake_case , (list, tuple) ) else [optimizers]
__a =split_batches
__a =step_with_optimizer
__a =GradientState()
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> Tuple:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__snake_case , **__snake_case )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__snake_case , **__snake_case )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__a =AcceleratorState().num_processes
for _ in range(__snake_case ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__snake_case , **__snake_case )
else:
self.scheduler.step(*__snake_case , **__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return self.scheduler.get_last_lr()
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.scheduler.state_dict()
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
self.scheduler.load_state_dict(__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.get_lr()
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.print_lr(*__snake_case , **__snake_case )
| 308
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 1
|
def UpperCamelCase_( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
__a =set()
# Replace all the whitespace in our sentence
__a =input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_snake_case ) == 26
def UpperCamelCase_( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
__a =[False] * 26
for char in input_str:
if char.islower():
__a =True
elif char.isupper():
__a =True
return all(_snake_case )
def UpperCamelCase_( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase_( ):
"""simple docstring"""
from timeit import timeit
__a ='from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=_snake_case ) )
print(timeit('is_pangram_faster()' , setup=_snake_case ) )
print(timeit('is_pangram_fastest()' , setup=_snake_case ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
__a =word.split()
def justify(_snake_case : list , _snake_case : int , _snake_case : int ) -> str:
__a =max_width - width
__a =len(_snake_case )
if len(_snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__a =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__a =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__a =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_snake_case ):
num_spaces_between_words_list[i] += 1
__a =[]
for i in range(_snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_snake_case )
__a =[]
__a =[]
__a =0
for word in words:
if width + len(_snake_case ) + len(_snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_snake_case )
width += len(_snake_case )
else:
# justify the line and add it to result
answer.append(justify(_snake_case , _snake_case , _snake_case ) )
# reset new line and new width
__a , __a =[word], len(_snake_case )
__a =max_width - width - len(_snake_case )
answer.append(' '.join(_snake_case ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
from math import ceil
def UpperCamelCase_( _snake_case : int , _snake_case : Any ):
"""simple docstring"""
__a =list(range(0 , _snake_case ) )
__a =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__a =[]
for i in device_map_blocks:
if device_map_blocks.count(_snake_case ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_snake_case )
# Missing blocks
__a =[i for i in blocks if i not in device_map_blocks]
__a =[i for i in device_map_blocks if i not in blocks]
if len(_snake_case ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_snake_case ) )
if len(_snake_case ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_snake_case ) )
if len(_snake_case ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_snake_case ) )
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =list(range(_snake_case ) )
__a =int(ceil(n_layers / len(_snake_case ) ) )
__a =[layers[i : i + n_blocks] for i in range(0 , _snake_case , _snake_case )]
return dict(zip(_snake_case , _snake_case ) )
| 308
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 1
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Any=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__a =os.path.abspath(_snake_case )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__a =torch.load(_snake_case , map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__a =convert_pytorch_state_dict_to_flax(_snake_case , _snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__a =convert_pytorch_sharded_state_dict_to_flax(_snake_case , _snake_case )
return flax_state_dict
def UpperCamelCase_( _snake_case : Tuple[str] , _snake_case : np.ndarray , _snake_case : Dict[str, jnp.ndarray] , _snake_case : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_snake_case : Tuple[str] ) -> bool:
return len(set(_snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__a =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__a =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__a =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__a =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__a =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_snake_case ):
__a =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__a =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_snake_case ):
__a =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__a =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__a =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__a =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__a =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__a =pt_tuple_key[-2] + '_v'
if name is not None:
__a =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_( _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
__a ={k: v.numpy() for k, v in pt_state_dict.items()}
__a =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__a =flax_model.params['params']
else:
__a =flax_model.params
__a =flatten_dict(_snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_snake_case )
__a ={}
__a =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a =rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
__a =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def UpperCamelCase_( _snake_case : int , _snake_case : str ):
"""simple docstring"""
import torch
# Load the index
__a ={}
for shard_file in shard_filenames:
# load using msgpack utils
__a =torch.load(_snake_case )
__a ={k: v.numpy() for k, v in pt_state_dict.items()}
__a =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a =flax_model.params['params']
__a =flatten_dict(_snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__a =flax_model.params
__a =flatten_dict(_snake_case )
__a =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__a =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__a =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a =rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
__a =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
if "var" in flax_key[-1]:
__a =jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
__a =jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =os.path.abspath(_snake_case )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__a =getattr(_snake_case , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_snake_case , 'rb' ) as state_f:
try:
__a =from_bytes(_snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__a =flatten_dict(jax.tree_util.tree_map(lambda _snake_case : x.dtype == jnp.bfloataa , _snake_case ) ).values()
if any(_snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__a =jax.tree_util.tree_map(
lambda _snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _snake_case )
__a =flatten_dict(_snake_case )
__a =pt_model.state_dict()
__a =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__a =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__a =[]
__a =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__a =flax_key_tuple[0] == pt_model.base_model_prefix
__a ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__a =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__a =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_snake_case ) not in pt_model_dict:
# conv layer
__a =flax_key_tuple[:-1] + ('weight',)
__a =jnp.transpose(_snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_snake_case ) not in pt_model_dict:
# linear layer
__a =flax_key_tuple[:-1] + ('weight',)
__a =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__a =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__a =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__a ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__a ='.'.join(_snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__a ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__a =key.split('.' )
__a =None
if key_components[-3::2] == ["parametrizations", "original0"]:
__a =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__a =key_components[-2] + '_v'
if name is not None:
__a =key_components[:-3] + [name]
__a ='.'.join(_snake_case )
__a =key
if flax_key in special_pt_names:
__a =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__a =np.asarray(_snake_case ) if not isinstance(_snake_case , np.ndarray ) else flax_tensor
__a =torch.from_numpy(_snake_case )
# remove from missing keys
missing_keys.remove(_snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_snake_case )
pt_model.load_state_dict(_snake_case )
# re-transform missing_keys to list
__a =list(_snake_case )
if len(_snake_case ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_snake_case ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 308
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
__a =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a =CLIPTextModel(__snake_case )
__a =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a ='cpu' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
__a =StableDiffusionLDMaDPipeline(**__snake_case )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb[0, -3:, -3:, -1]
__a =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__a =np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
__a =np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.get_dummy_components()
__a =StableDiffusionLDMaDPipeline(**__snake_case )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a =3 * [inputs['prompt']]
# forward
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb_slice_a[0, -3:, -3:, -1]
__a =depth_slice_a[0, -3:, -1]
__a =self.get_dummy_inputs(__snake_case )
__a =3 * [inputs.pop('prompt' )]
__a =ldmad_pipe.tokenizer(
__snake_case , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
__a =text_inputs['input_ids'].to(__snake_case )
__a =ldmad_pipe.text_encoder(__snake_case )[0]
__a =prompt_embeds
# forward
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb_slice_a[0, -3:, -3:, -1]
__a =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a ='cpu' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
__a =PNDMScheduler(skip_prk_steps=__snake_case )
__a =StableDiffusionLDMaDPipeline(**__snake_case )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a ='french fries'
__a =ldmad_pipe(**__snake_case , negative_prompt=__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb[0, -3:, -3:, -1]
__a =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__a =np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
__a =np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , __snake_case , __snake_case="cpu" , __snake_case=torch.floataa , __snake_case=0 ) -> Optional[int]:
'''simple docstring'''
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a =np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) )
__a =torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
__a ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
__a =ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =rgb[0, -3:, -3:, -1].flatten()
__a =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
__a =np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
__a =np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , __snake_case , __snake_case="cpu" , __snake_case=torch.floataa , __snake_case=0 ) -> Optional[int]:
'''simple docstring'''
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a =np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) )
__a =torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
__a ={
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =0.49_5586
__a =0.3379_5515
__a =112.4_8518
__a =98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_inputs(__snake_case )
__a =ldmad_pipe(**__snake_case )
__a , __a =output.rgb, output.depth
__a =0.419_4127
__a =0.3537_5586
__a =0.563_8502
__a =0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 308
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 1
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
def wrapper(*_snake_case : Union[str, Any] , **_snake_case : List[str] ):
__a =timeit.default_timer()
__a =func(*_snake_case , **_snake_case )
__a =timeit.default_timer() - starttime
return delta
__a =func.__name__
return wrapper
def UpperCamelCase_( _snake_case : dict , _snake_case : List[Any]=100 , _snake_case : Tuple=None ):
"""simple docstring"""
__a =[]
__a =seq_shapes or {}
for i in range(_snake_case ):
__a ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_snake_case , _ArrayXD ):
__a =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_snake_case , datasets.Value ):
if v.dtype == "string":
__a ='The small grey turtle was surprisingly fast when challenged.'
else:
__a =np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_snake_case , datasets.Sequence ):
while isinstance(_snake_case , datasets.Sequence ):
__a =v.feature
__a =seq_shapes[k]
__a =np.random.rand(*_snake_case ).astype(v.dtype )
__a =data
dummy_data.append((i, example) )
return dummy_data
def UpperCamelCase_( _snake_case : str , _snake_case : Any , _snake_case : List[str]=100 , _snake_case : Union[str, Any]=None ):
"""simple docstring"""
__a =generate_examples(_snake_case , num_examples=_snake_case , seq_shapes=_snake_case )
with ArrowWriter(features=_snake_case , path=_snake_case ) as writer:
for key, record in dummy_data:
__a =features.encode_example(_snake_case )
writer.write(_snake_case )
__a , __a =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__a =datasets.Dataset.from_file(filename=_snake_case , info=datasets.DatasetInfo(features=_snake_case ) )
return dataset
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase : Any = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__a =terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 308
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def UpperCamelCase_( _snake_case : Any , _snake_case : int , _snake_case : str ):
"""simple docstring"""
__a =state_dict.pop(_snake_case )
__a =val
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__a =key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__a =value
else:
__a =value
return new_state_dict
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__a =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__a =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__a =in_proj_weight[:256, :]
__a =in_proj_bias[:256]
__a =in_proj_weight[256:512, :]
__a =in_proj_bias[256:512]
__a =in_proj_weight[-256:, :]
__a =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__a =state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__a =state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__a =in_proj_weight[:256, :]
__a =in_proj_bias[:256]
__a =in_proj_weight[256:512, :]
__a =in_proj_bias[256:512]
__a =in_proj_weight[-256:, :]
__a =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__a =state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
__a =state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__a =in_proj_weight_cross_attn[:256, :]
__a =in_proj_bias_cross_attn[:256]
__a =in_proj_weight_cross_attn[256:512, :]
__a =in_proj_bias_cross_attn[256:512]
__a =in_proj_weight_cross_attn[-256:, :]
__a =in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( _snake_case : Any , _snake_case : List[Any] ):
"""simple docstring"""
__a , __a =image.size
__a =max(_snake_case , _snake_case )
__a =800 if 'detection' in checkpoint_url else 1000
__a =target_max_size / current_max_size
__a =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =F.to_tensor(_snake_case )
__a =F.normalize(_snake_case , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple , _snake_case : int ):
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
__a =torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
__a =rename_backbone_keys(_snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(_snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__a ='model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__a =state_dict.pop(_snake_case )
__a =val
# create HuggingFace model and load state dict
__a =TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__a =15
__a =2
__a ={0: 'table', 1: 'table rotated'}
__a =idalabel
__a ={v: k for k, v in idalabel.items()}
else:
__a =125
__a =6
__a ={
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
__a =idalabel
__a ={v: k for k, v in idalabel.items()}
__a =DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
__a =TableTransformerForObjectDetection(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# verify our conversion
__a ='example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
__a =hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=_snake_case )
__a =Image.open(_snake_case ).convert('RGB' )
__a =normalize(resize(_snake_case , _snake_case ) ).unsqueeze(0 )
__a =model(_snake_case )
if "detection" in checkpoint_url:
__a =(1, 15, 3)
__a =torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__a =torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__a =(1, 125, 7)
__a =torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__a =torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _snake_case , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _snake_case , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
__a =(
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_snake_case )
image_processor.push_to_hub(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =model.config
__a =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__a =MBartConfig(
is_decoder=_snake_case , is_encoder_decoder=_snake_case , add_cross_attention=_snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_snake_case , add_final_layer_norm=_snake_case , )
return encoder_config, decoder_config
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if "encoder.model" in name:
__a =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
__a =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
__a =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__a ='encoder.' + name
if "attn.proj" in name:
__a =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__a =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
__a ='encoder.layernorm.bias'
return name
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a =orig_state_dict.pop(_snake_case )
if "qkv" in key:
__a =key.split('.' )
__a =int(key_split[3] )
__a =int(key_split[5] )
__a =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a =val[:dim, :]
__a =val[dim : dim * 2, :]
__a =val[-dim:, :]
else:
__a =val[:dim]
__a =val[dim : dim * 2]
__a =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a =val
return orig_state_dict
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =DonutModel.from_pretrained(_snake_case ).eval()
# load HuggingFace model
__a , __a =get_configs(_snake_case )
__a =DonutSwinModel(_snake_case )
__a =MBartForCausalLM(_snake_case )
__a =VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
__a =original_model.state_dict()
__a =convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify results on scanned document
__a =load_dataset('hf-internal-testing/example-documents' )
__a =dataset['test'][0]['image'].convert('RGB' )
__a =XLMRobertaTokenizerFast.from_pretrained(_snake_case , from_slow=_snake_case )
__a =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a =DonutProcessor(_snake_case , _snake_case )
__a =processor(_snake_case , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a ='When is the coffee break?'
__a =task_prompt.replace('{user_input}' , _snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a ='hello world'
else:
raise ValueError('Model name not supported' )
__a =original_model.decoder.tokenizer(_snake_case , add_special_tokens=_snake_case , return_tensors='pt' )[
'input_ids'
]
__a =original_model.encoder.model.patch_embed(_snake_case )
__a , __a =model.encoder.embeddings(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
# verify encoder hidden states
__a =original_model.encoder(_snake_case )
__a =model.encoder(_snake_case ).last_hidden_state
assert torch.allclose(_snake_case , _snake_case , atol=1e-2 )
# verify decoder hidden states
__a =original_model(_snake_case , _snake_case , _snake_case ).logits
__a =model(_snake_case , decoder_input_ids=_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 1
|
# using dfs for finding eulerian path traversal
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Optional[int]=None ):
"""simple docstring"""
__a =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__a , __a =True, True
__a =dfs(_snake_case , _snake_case , _snake_case , _snake_case )
return path
def UpperCamelCase_( _snake_case : int , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =0
__a =-1
for i in range(_snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__a =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Tuple ):
"""simple docstring"""
__a =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__a , __a =check_circuit_or_path(_snake_case , _snake_case )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
__a =1
if check == 2:
__a =odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
__a =dfs(_snake_case , _snake_case , _snake_case )
print(_snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__a ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__a ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__a ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
__a ={
1: [],
2: []
# all degree is zero
}
__a =10
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
check_euler(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 308
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308
| 1
|
from __future__ import annotations
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__a) , (__a)) =extended_euclid(_snake_case , a % b )
__a =a // b
return (y, x - k * y)
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
((__a) , (__a)) =extended_euclid(_snake_case , _snake_case )
__a =na * na
__a =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
((__a) , (__a)) =extended_euclid(_snake_case , _snake_case )
if b < 0:
__a =(b % n + n) % n
return b
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a , __a =invert_modulo(_snake_case , _snake_case ), invert_modulo(_snake_case , _snake_case )
__a =na * na
__a =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 308
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Any = "▁"
_lowerCAmelCase : Optional[Any] = {"vocab_file": "spiece.model"}
_lowerCAmelCase : List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_lowerCAmelCase : Union[str, Any] = {
"google/pegasus-xsum": 512,
}
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case="<pad>" , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<mask_2>" , __snake_case="<mask_1>" , __snake_case=None , __snake_case=103 , __snake_case = None , **__snake_case , ) -> None:
'''simple docstring'''
__a =offset
if additional_special_tokens is not None:
if not isinstance(__snake_case , __snake_case ):
raise TypeError(
f'additional_special_tokens should be of type {type(__snake_case )}, but is'
f' {type(__snake_case )}' )
__a =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__snake_case ) , self.offset - 1 )
]
if len(set(__snake_case ) ) != len(__snake_case ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__a =additional_special_tokens_extended
else:
__a =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
__a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , mask_token=__snake_case , pad_token=__snake_case , mask_token_sent=__snake_case , offset=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__a =mask_token_sent
__a =vocab_file
__a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
# add special tokens to encoder dict
__a ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__a ={v: k for k, v in self.encoder.items()}
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def __magic_name__ ( self ) -> Dict[str, int]:
'''simple docstring'''
__a ={self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.__dict__.copy()
__a =None
return state
def __setstate__( self , __snake_case ) -> str:
'''simple docstring'''
__a =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a ={}
__a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__a =self.sp_model.piece_to_id(__snake_case )
return sp_id + self.offset
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__a =self.sp_model.IdToPiece(index - self.offset )
return token
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
__a =[]
__a =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
__a =[]
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __magic_name__ ( self , __snake_case=False ) -> Union[str, Any]:
'''simple docstring'''
return 1
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__snake_case )
elif token_ids_a is None:
return self._special_token_mask(__snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __magic_name__ ( self , __snake_case , __snake_case=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__a =os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , 'wb' ) as fi:
__a =self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 308
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
| 1
|
import argparse
from collections import defaultdict
import yaml
_lowerCAmelCase : Union[str, Any] = "docs/source/en/_toctree.yml"
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =defaultdict(_snake_case )
__a =[]
__a =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_snake_case )
__a =new_doc_list
__a =[key for key, value in counts.items() if value > 1]
__a =[]
for duplicate_key in duplicates:
__a =list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_snake_case ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__a =sorted(_snake_case , key=lambda _snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_snake_case ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_snake_case )
# Sort
return overview_doc
def UpperCamelCase_( _snake_case : int=False ):
"""simple docstring"""
with open(_snake_case , encoding='utf-8' ) as f:
__a =yaml.safe_load(f.read() )
# Get to the API doc
__a =0
while content[api_idx]["title"] != "API":
api_idx += 1
__a =content[api_idx]['sections']
# Then to the model doc
__a =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__a =api_doc[scheduler_idx]['sections']
__a =clean_doc_toc(_snake_case )
__a =False
if new_scheduler_doc != scheduler_doc:
__a =True
if overwrite:
__a =new_scheduler_doc
if diff:
if overwrite:
__a =api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase_( _snake_case : Optional[Any]=False ):
"""simple docstring"""
with open(_snake_case , encoding='utf-8' ) as f:
__a =yaml.safe_load(f.read() )
# Get to the API doc
__a =0
while content[api_idx]["title"] != "API":
api_idx += 1
__a =content[api_idx]['sections']
# Then to the model doc
__a =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__a =False
__a =api_doc[pipeline_idx]['sections']
__a =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__a =pipeline_doc['section']
__a =clean_doc_toc(_snake_case )
if overwrite:
__a =new_sub_pipeline_doc
new_pipeline_docs.append(_snake_case )
# sort overall pipeline doc
__a =clean_doc_toc(_snake_case )
if new_pipeline_docs != pipeline_docs:
__a =True
if overwrite:
__a =new_pipeline_docs
if diff:
if overwrite:
__a =api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase : Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 308
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 1
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 1
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCAmelCase : Optional[Any] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =load_tool('text-question-answering' )
self.tool.setup()
__a =load_tool('text-question-answering' , remote=__snake_case )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tool(__snake_case , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__snake_case , 'launched the BigScience Research Workshop' )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.remote_tool(__snake_case , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__snake_case , 'launched the BigScience Research Workshop' )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tool(text=__snake_case , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__snake_case , 'launched the BigScience Research Workshop' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.remote_tool(text=__snake_case , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__snake_case , 'launched the BigScience Research Workshop' )
| 308
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 1
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase_( _snake_case : str , _snake_case : dict ):
"""simple docstring"""
__a =BeautifulSoup(requests.get(_snake_case , params=_snake_case ).content , 'html.parser' )
__a =soup.find('div' , attrs={'class': 'gs_ri'} )
__a =div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCAmelCase : Tuple = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2_018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'{test_file} instead.' )
__a =components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
__a =components[:-1] + [test_fn.replace('.py' , '' )]
__a ='.'.join(_snake_case )
return test_module_path
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_module_path(_snake_case )
__a =importlib.import_module(_snake_case )
return test_module
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =[]
__a =get_test_module(_snake_case )
for attr in dir(_snake_case ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_snake_case , _snake_case ) )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =[]
__a =get_test_module(_snake_case )
for attr in dir(_snake_case ):
__a =getattr(_snake_case , _snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a =getattr(_snake_case , 'all_model_classes' , [] )
if len(_snake_case ) > 0:
test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =get_test_classes(_snake_case )
__a =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =test_class()
if hasattr(_snake_case , 'setUp' ):
test.setUp()
__a =None
if hasattr(_snake_case , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a =test.model_tester.__class__
return model_tester
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple ):
"""simple docstring"""
__a =get_test_classes(_snake_case )
__a =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =get_test_classes_for_model(_snake_case , _snake_case )
__a =[]
for test_class in test_classes:
__a =get_model_tester_from_test_class(_snake_case )
if tester_class is not None:
tester_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =get_test_classes(_snake_case )
__a ={test_class: get_model_tester_from_test_class(_snake_case ) for test_class in test_classes}
return test_tester_mapping
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =get_model_classes(_snake_case )
__a ={
model_class: get_test_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_test_mapping
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =get_model_classes(_snake_case )
__a ={
model_class: get_tester_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return o
elif isinstance(_snake_case , _snake_case ):
return o.__name__
elif isinstance(_snake_case , (list, tuple) ):
return [to_json(_snake_case ) for x in o]
elif isinstance(_snake_case , _snake_case ):
return {to_json(_snake_case ): to_json(_snake_case ) for k, v in o.items()}
else:
return o
| 308
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : str = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__snake_case ) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a =deprecated_arg[3:]
setattr(self , __snake_case , not kwargs.pop(__snake_case ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
__a =kwargs.pop('torchscript' , self.torchscript )
__a =kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__a =kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'Trace the models using torchscript'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
SCREAMING_SNAKE_CASE = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __magic_name__ ( self ) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__a =torch.device('cpu' )
__a =0
elif is_torch_tpu_available():
__a =xm.xla_device()
__a =0
else:
__a =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a =torch.cuda.device_count()
return device, n_gpu
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __magic_name__ ( self ) -> "torch.device":
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 308
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 1
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'ibert'
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case="absolute" , __snake_case=False , __snake_case="none" , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_act
__a =intermediate_size
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =initializer_range
__a =layer_norm_eps
__a =position_embedding_type
__a =quant_mode
__a =force_dequant
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 308
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 1
|
def UpperCamelCase_( _snake_case : list ):
"""simple docstring"""
if len(_snake_case ) <= 1:
return lst
__a =1
while i < len(_snake_case ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__a , __a =lst[i], lst[i - 1]
i -= 1
if i == 0:
__a =1
return lst
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 308
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : List[str] = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'mask2former'
SCREAMING_SNAKE_CASE = ['swin']
SCREAMING_SNAKE_CASE = {'hidden_size': 'hidden_dim'}
def __init__( self , __snake_case = None , __snake_case = 256 , __snake_case = 256 , __snake_case = 256 , __snake_case = 1024 , __snake_case = "relu" , __snake_case = 6 , __snake_case = 10 , __snake_case = 8 , __snake_case = 0.0 , __snake_case = 2048 , __snake_case = False , __snake_case = False , __snake_case = 4 , __snake_case = 255 , __snake_case = 100 , __snake_case = 0.1 , __snake_case = 2.0 , __snake_case = 5.0 , __snake_case = 5.0 , __snake_case = 1_2544 , __snake_case = 3.0 , __snake_case = 0.75 , __snake_case = 0.02 , __snake_case = 1.0 , __snake_case = True , __snake_case = [4, 8, 16, 32] , __snake_case = None , **__snake_case , ) -> str:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__a =CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__snake_case , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(__snake_case , __snake_case ):
__a =backbone_config.pop('model_type' )
__a =CONFIG_MAPPING[backbone_model_type]
__a =config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__a =backbone_config
__a =feature_size
__a =mask_feature_size
__a =hidden_dim
__a =encoder_feedforward_dim
__a =activation_function
__a =encoder_layers
__a =decoder_layers
__a =num_attention_heads
__a =dropout
__a =dim_feedforward
__a =pre_norm
__a =enforce_input_projection
__a =common_stride
__a =ignore_value
__a =num_queries
__a =no_object_weight
__a =class_weight
__a =mask_weight
__a =dice_weight
__a =train_num_points
__a =oversample_ratio
__a =importance_sample_ratio
__a =init_std
__a =init_xavier_std
__a =use_auxiliary_loss
__a =feature_strides
__a =output_auxiliary_logits
__a =decoder_layers
super().__init__(**__snake_case )
@classmethod
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> str:
'''simple docstring'''
return cls(
backbone_config=__snake_case , **__snake_case , )
def __magic_name__ ( self ) -> Dict[str, any]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.backbone_config.to_dict()
__a =self.__class__.model_type
return output
| 308
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase_( ):
"""simple docstring"""
__a =ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_snake_case , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_snake_case , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_snake_case )
return parser.parse_args()
def UpperCamelCase_( ):
"""simple docstring"""
__a =parse_args()
# Import training_script as a module.
__a =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a =script_fpath.stem
__a =importlib.import_module(_snake_case )
# Patch sys.argv
__a =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : Any = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 308
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Any ):
"""simple docstring"""
__a =b.T
__a =np.sum(np.square(_snake_case ) , axis=1 )
__a =np.sum(np.square(_snake_case ) , axis=0 )
__a =np.matmul(_snake_case , _snake_case )
__a =aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase_( _snake_case : str , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =x.reshape(-1 , 3 )
__a =squared_euclidean_distance(_snake_case , _snake_case )
return np.argmin(_snake_case , axis=1 )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = True , __snake_case = True , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'height': 256, 'width': 256}
__a =get_size_dict(__snake_case )
__a =np.array(__snake_case ) if clusters is not None else None
__a =do_resize
__a =size
__a =resample
__a =do_normalize
__a =do_color_quantize
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BILINEAR , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case , size=(size['height'], size['width']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , ) -> np.ndarray:
'''simple docstring'''
__a =rescale(image=__snake_case , scale=1 / 127.5 , data_format=__snake_case )
__a =image - 1
return image
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case )
__a =resample if resample is not None else self.resample
__a =do_normalize if do_normalize is not None else self.do_normalize
__a =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__a =clusters if clusters is not None else self.clusters
__a =np.array(__snake_case )
__a =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__a =[to_numpy_array(__snake_case ) for image in images]
if do_resize:
__a =[self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_normalize:
__a =[self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
__a =[to_channel_dimension_format(__snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__a =np.array(__snake_case )
__a =color_quantize(__snake_case , __snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__a =images.shape[0]
__a =images.reshape(__snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__a =list(__snake_case )
else:
__a =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a ={'input_ids': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 308
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCAmelCase : Union[str, Any] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __magic_name__ :
def __init__( self , __snake_case = 14 ) -> None:
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
__a =primes[group]['prime']
__a =primes[group]['generator']
__a =int(hexlify(urandom(32 ) ) , base=16 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =pow(self.generator , self.__private_key , self.prime )
return hex(__snake_case )[2:]
def __magic_name__ ( self , __snake_case ) -> bool:
'''simple docstring'''
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__snake_case , (self.prime - 1) // 2 , self.prime ) == 1
)
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =int(__snake_case , base=16 )
if not self.is_valid_public_key(__snake_case ):
raise ValueError('Invalid public key' )
__a =pow(__snake_case , self.__private_key , self.prime )
return shaaaa(str(__snake_case ).encode() ).hexdigest()
@staticmethod
def __magic_name__ ( __snake_case , __snake_case ) -> bool:
'''simple docstring'''
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__snake_case , (prime - 1) // 2 , __snake_case ) == 1
)
@staticmethod
def __magic_name__ ( __snake_case , __snake_case , __snake_case = 14 ) -> str:
'''simple docstring'''
__a =int(__snake_case , base=16 )
__a =int(__snake_case , base=16 )
__a =primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(__snake_case , __snake_case ):
raise ValueError('Invalid public key' )
__a =pow(__snake_case , __snake_case , __snake_case )
return shaaaa(str(__snake_case ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 1
|
from __future__ import annotations
def UpperCamelCase_( _snake_case : float , _snake_case : float , _snake_case : float , ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : Union[str, Any] = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def UpperCamelCase_( _snake_case : Dict , _snake_case : int , _snake_case : List[Any]=8 ):
"""simple docstring"""
__a =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__a =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
__a =2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
if latents is None:
__a =randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__a =latents.to(__snake_case )
__a =latents * scheduler.init_noise_sigma
return latents
def __magic_name__ ( self , __snake_case=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__a =torch.device(f'cuda:{gpu_id}' )
__a =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def __magic_name__ ( self , __snake_case=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__a =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__a =None
for cpu_offloaded_model in [self.unet, self.movq]:
__a , __a =cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
__a =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case = 512 , __snake_case = 512 , __snake_case = 100 , __snake_case = 4.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> str:
'''simple docstring'''
__a =self._execution_device
__a =guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
__a =torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
__a =torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
__a =torch.cat(__snake_case , dim=0 )
__a =image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__a =image_embeds.repeat_interleave(__snake_case , dim=0 )
__a =negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
__a =hint.repeat_interleave(__snake_case , dim=0 )
__a =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
__a =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
__a =self.scheduler.timesteps
__a =self.movq.config.latent_channels
__a , __a =downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
__a =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
__a =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a ={'image_embeds': image_embeds, 'hint': hint}
__a =self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
__a , __a =noise_pred.split(latents.shape[1] , dim=1 )
__a , __a =noise_pred.chunk(2 )
__a , __a =variance_pred.chunk(2 )
__a =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__a =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__a , __a =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__a =self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
__a =self.movq.decode(__snake_case , force_not_quantize=__snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__a =image * 0.5 + 0.5
__a =image.clamp(0 , 1 )
__a =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 308
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308
| 1
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowerCAmelCase : int = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , **__snake_case ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , __snake_case , **__snake_case ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(__snake_case , **__snake_case )
def __magic_name__ ( self , **__snake_case ) -> str:
'''simple docstring'''
__a ={}
if "candidate_labels" in kwargs:
__a =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__a =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __magic_name__ ( self , __snake_case , __snake_case=None , __snake_case="This is a sound of {}." ) -> Optional[int]:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a =requests.get(__snake_case ).content
else:
with open(__snake_case , 'rb' ) as f:
__a =f.read()
if isinstance(__snake_case , __snake_case ):
__a =ffmpeg_read(__snake_case , self.feature_extractor.sampling_rate )
if not isinstance(__snake_case , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
__a =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
__a =candidate_labels
__a =[hypothesis_template.format(__snake_case ) for x in candidate_labels]
__a =self.tokenizer(__snake_case , return_tensors=self.framework , padding=__snake_case )
__a =[text_inputs]
return inputs
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
__a =model_inputs.pop('candidate_labels' )
__a =model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , __snake_case ):
__a =text_inputs[0]
else:
# Batching case.
__a =text_inputs[0][0]
__a =self.model(**__snake_case , **__snake_case )
__a ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =model_outputs.pop('candidate_labels' )
__a =model_outputs['logits'][0]
if self.framework == "pt":
__a =logits.softmax(dim=0 )
__a =probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
__a =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(__snake_case , __snake_case ) , key=lambda __snake_case : -x[0] )
]
return result
| 308
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =model.config
__a =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__a =MBartConfig(
is_decoder=_snake_case , is_encoder_decoder=_snake_case , add_cross_attention=_snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_snake_case , add_final_layer_norm=_snake_case , )
return encoder_config, decoder_config
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if "encoder.model" in name:
__a =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
__a =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
__a =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__a ='encoder.' + name
if "attn.proj" in name:
__a =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__a =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
__a ='encoder.layernorm.bias'
return name
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a =orig_state_dict.pop(_snake_case )
if "qkv" in key:
__a =key.split('.' )
__a =int(key_split[3] )
__a =int(key_split[5] )
__a =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a =val[:dim, :]
__a =val[dim : dim * 2, :]
__a =val[-dim:, :]
else:
__a =val[:dim]
__a =val[dim : dim * 2]
__a =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a =val
return orig_state_dict
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =DonutModel.from_pretrained(_snake_case ).eval()
# load HuggingFace model
__a , __a =get_configs(_snake_case )
__a =DonutSwinModel(_snake_case )
__a =MBartForCausalLM(_snake_case )
__a =VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
__a =original_model.state_dict()
__a =convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify results on scanned document
__a =load_dataset('hf-internal-testing/example-documents' )
__a =dataset['test'][0]['image'].convert('RGB' )
__a =XLMRobertaTokenizerFast.from_pretrained(_snake_case , from_slow=_snake_case )
__a =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a =DonutProcessor(_snake_case , _snake_case )
__a =processor(_snake_case , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a ='When is the coffee break?'
__a =task_prompt.replace('{user_input}' , _snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a ='hello world'
else:
raise ValueError('Model name not supported' )
__a =original_model.decoder.tokenizer(_snake_case , add_special_tokens=_snake_case , return_tensors='pt' )[
'input_ids'
]
__a =original_model.encoder.model.patch_embed(_snake_case )
__a , __a =model.encoder.embeddings(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
# verify encoder hidden states
__a =original_model.encoder(_snake_case )
__a =model.encoder(_snake_case ).last_hidden_state
assert torch.allclose(_snake_case , _snake_case , atol=1e-2 )
# verify decoder hidden states
__a =original_model(_snake_case , _snake_case , _snake_case ).logits
__a =model(_snake_case , decoder_input_ids=_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 1
|
from __future__ import annotations
from typing import Any
def UpperCamelCase_( _snake_case : list[Any] ):
"""simple docstring"""
create_state_space_tree(_snake_case , [] , 0 )
def UpperCamelCase_( _snake_case : list[Any] , _snake_case : list[Any] , _snake_case : int ):
"""simple docstring"""
if index == len(_snake_case ):
print(_snake_case )
return
create_state_space_tree(_snake_case , _snake_case , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_snake_case , _snake_case , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 308
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308
| 1
|
import os
from datetime import datetime as dt
from github import Github
_lowerCAmelCase : str = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def UpperCamelCase_( ):
"""simple docstring"""
__a =Github(os.environ['GITHUB_TOKEN'] )
__a =g.get_repo('huggingface/diffusers' )
__a =repo.get_issues(state='open' )
for issue in open_issues:
__a =sorted(issue.get_comments() , key=lambda _snake_case : i.created_at , reverse=_snake_case )
__a =comments[0] if len(_snake_case ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 308
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 1
|
_lowerCAmelCase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( ):
"""simple docstring"""
__a =input('Enter message: ' )
__a =input('Enter key [alphanumeric]: ' )
__a =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
__a ='encrypt'
__a =encrypt_message(_snake_case , _snake_case )
elif mode.lower().startswith('d' ):
__a ='decrypt'
__a =decrypt_message(_snake_case , _snake_case )
print(F'\n{mode.title()}ed message:' )
print(_snake_case )
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
return translate_message(_snake_case , _snake_case , 'encrypt' )
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
return translate_message(_snake_case , _snake_case , 'decrypt' )
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =[]
__a =0
__a =key.upper()
for symbol in message:
__a =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_snake_case ):
__a =0
else:
translated.append(_snake_case )
return "".join(_snake_case )
if __name__ == "__main__":
main()
| 308
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {"vocab_file": "vocab.txt"}
_lowerCAmelCase : Any = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_lowerCAmelCase : List[str] = {
"openbmb/cpm-ant-10b": 1_024,
}
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =collections.OrderedDict()
with open(_snake_case , 'r' , encoding='utf-8' ) as reader:
__a =reader.readlines()
for index, token in enumerate(_snake_case ):
__a =token.rstrip('\n' )
__a =index
return vocab
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case="<unk>" , __snake_case=200 ) -> int:
'''simple docstring'''
__a =vocab
__a =unk_token
__a =max_input_chars_per_word
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a =list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
__a =0
__a =[]
while start < len(__snake_case ):
__a =len(__snake_case )
__a =None
while start < end:
__a =''.join(chars[start:end] )
if substr in self.vocab:
__a =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
__a =end
return sub_tokens
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = False
def __init__( self , __snake_case , __snake_case="<d>" , __snake_case="</d>" , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<pad>" , __snake_case="<unk>" , __snake_case="</n>" , __snake_case="</_>" , __snake_case="left" , **__snake_case , ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
__a =bod_token
__a =eod_token
__a =load_vocab(__snake_case )
__a =self.encoder[space_token]
__a =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__a =collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
__a ={v: k for k, v in self.encoder.items()}
__a =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.encoder["\n"]
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def __magic_name__ ( self , __snake_case , **__snake_case ) -> str:
'''simple docstring'''
__a =[i for i in token_ids if i >= 0]
__a =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> Tuple:
'''simple docstring'''
return token in self.encoder
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return "".join(__snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
return self.decoder.get(__snake_case , self.unk_token )
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if os.path.isdir(__snake_case ):
__a =os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__a =(filename_prefix + '-' if filename_prefix else '') + save_directory
__a =0
if " " in self.encoder:
__a =self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__a =self.encoder['\n']
del self.encoder["\n"]
__a =collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__a =token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 308
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 1
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 1
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_snake_case , default=_snake_case , required=_snake_case , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_snake_case , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_snake_case , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_snake_case , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_snake_case , default=0 , help='cuda_id.' , )
__a =parser.parse_args()
return args
def UpperCamelCase_( _snake_case : Any , _snake_case : Tuple , _snake_case : Optional[int] ):
"""simple docstring"""
if not len(_snake_case ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__a , __a =imgs[0].size
__a =Image.new('RGB' , size=(cols * w, rows * h) )
__a , __a =grid.size
for i, img in enumerate(_snake_case ):
grid.paste(_snake_case , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Union[str, Any]="robotic cat with wings" , _snake_case : Union[str, Any]=7.5 , _snake_case : Any=50 , _snake_case : Any=1 , _snake_case : Dict=42 , ):
"""simple docstring"""
__a =torch.Generator(pipeline.device ).manual_seed(_snake_case )
__a =pipeline(
_snake_case , guidance_scale=_snake_case , num_inference_steps=_snake_case , generator=_snake_case , num_images_per_prompt=_snake_case , ).images
__a =int(math.sqrt(_snake_case ) )
__a =image_grid(_snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_lowerCAmelCase : Any = parse_args()
# Load models and create wrapper for stable diffusion
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
_lowerCAmelCase : Any = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
_lowerCAmelCase : Union[str, Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
_lowerCAmelCase : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
_lowerCAmelCase : Any = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowerCAmelCase : List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
_lowerCAmelCase : Optional[Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
_lowerCAmelCase : List[Any] = unet.to(torch.device("cuda", args.cuda_id))
_lowerCAmelCase : Optional[int] = pipeline.to(unet.device)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
_lowerCAmelCase : Union[str, Any] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowerCAmelCase : List[Any] = None
try:
import msvcrt
except ImportError:
_lowerCAmelCase : Dict = None
try:
import fcntl
except ImportError:
_lowerCAmelCase : List[str] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowerCAmelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
_lowerCAmelCase : Any = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_lowerCAmelCase : Optional[Any] = "3.0.12"
_lowerCAmelCase : Dict = None
def UpperCamelCase_( ):
"""simple docstring"""
global _logger
__a =_logger or logging.getLogger(__name__ )
return _logger
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case ) -> str:
'''simple docstring'''
__a =lock_file
return None
def __str__( self ) -> List[str]:
'''simple docstring'''
__a =f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __magic_name__ :
def __init__( self , __snake_case ) -> List[str]:
'''simple docstring'''
__a =lock
return None
def __enter__( self ) -> str:
'''simple docstring'''
return self.lock
def __exit__( self , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
self.lock.release()
return None
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=-1 , __snake_case=None ) -> Dict:
'''simple docstring'''
__a =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__a =self.hash_filename_if_too_long(__snake_case , __snake_case )
# The path to the lock file.
__a =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__a =None
# The default timeout value.
__a =timeout
# We use this lock primarily for the lock counter.
__a =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__a =0
return None
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._lock_file
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return self._timeout
@timeout.setter
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =float(__snake_case )
return None
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._lock_file_fd is not None
def __magic_name__ ( self , __snake_case=None , __snake_case=0.05 ) -> Dict:
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__a =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__a =id(self )
__a =self._lock_file
__a =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__snake_case )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__a =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __magic_name__ ( self , __snake_case=False ) -> Optional[Any]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__a =id(self )
__a =self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__a =0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> Tuple:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
self.release()
return None
def __del__( self ) -> List[str]:
'''simple docstring'''
self.release(force=__snake_case )
return None
def __magic_name__ ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
__a =os.path.basename(__snake_case )
if len(__snake_case ) > max_length and max_length > 0:
__a =os.path.dirname(__snake_case )
__a =str(hash(__snake_case ) )
__a =filename[: max_length - len(__snake_case ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__snake_case , __snake_case )
else:
return path
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case=-1 , __snake_case=None ) -> Tuple:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case )
__a ='\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__a =os.open(self._lock_file , __snake_case )
except OSError:
pass
else:
try:
msvcrt.locking(__snake_case , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__snake_case )
else:
__a =fd
return None
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self._lock_file_fd
__a =None
msvcrt.locking(__snake_case , msvcrt.LK_UNLCK , 1 )
os.close(__snake_case )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case=-1 , __snake_case=None ) -> Optional[int]:
'''simple docstring'''
__a =os.statvfs(os.path.dirname(__snake_case ) ).f_namemax
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =os.O_RDWR | os.O_CREAT | os.O_TRUNC
__a =os.open(self._lock_file , __snake_case )
try:
fcntl.flock(__snake_case , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__snake_case )
else:
__a =fd
return None
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__a =self._lock_file_fd
__a =None
fcntl.flock(__snake_case , fcntl.LOCK_UN )
os.close(__snake_case )
return None
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__a =os.open(self._lock_file , __snake_case )
except OSError:
pass
else:
__a =fd
return None
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
os.close(self._lock_file_fd )
__a =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowerCAmelCase : Any = None
if msvcrt:
_lowerCAmelCase : List[Any] = WindowsFileLock
elif fcntl:
_lowerCAmelCase : Optional[Any] = UnixFileLock
else:
_lowerCAmelCase : List[str] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 308
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case ) -> Any:
'''simple docstring'''
__a =parent
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {}
def UpperCamelCase_( ):
"""simple docstring"""
__a ='<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
__a ='\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__ ( self ) -> int:
'''simple docstring'''
# Initialize feature_extractor
__a =self.feature_extraction_class()
# Test not batched input
__a =get_html_strings()[0]
__a =feature_extractor(__snake_case )
# fmt: off
__a =[['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
__a =[['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , __snake_case )
self.assertEqual(encoding.xpaths , __snake_case )
# Test batched
__a =get_html_strings()
__a =feature_extractor(__snake_case )
# fmt: off
__a =expected_nodes + [['My First Heading', 'My first paragraph.']]
__a =expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __snake_case )
self.assertEqual(encoding.xpaths , __snake_case )
| 308
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase_( _snake_case : Callable[[int | float], int | float] , _snake_case : int | float , _snake_case : int | float , _snake_case : int = 100 , ):
"""simple docstring"""
__a =x_start
__a =fnc(_snake_case )
__a =0.0
for _ in range(_snake_case ):
# Approximates curve as a sequence of linear lines and sums their length
__a =(x_end - x_start) / steps + xa
__a =fnc(_snake_case )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__a =xa
__a =fxa
return length
if __name__ == "__main__":
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_lowerCAmelCase : Union[str, Any] = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 308
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308
| 1
|
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 308
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 1
|
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
assert x is not None
assert y is not None
__a =len(_snake_case )
__a =len(_snake_case )
# declaring the array for storing the dp values
__a =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__a =1 if x[i - 1] == y[j - 1] else 0
__a =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__a =''
__a , __a =m, n
while i > 0 and j > 0:
__a =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__a =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = "AGGTAB"
_lowerCAmelCase : List[Any] = "GXTXAYB"
_lowerCAmelCase : Any = 4
_lowerCAmelCase : List[Any] = "GTAB"
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 308
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['input_features']
def __init__( self , __snake_case=80 , __snake_case=1_6000 , __snake_case=160 , __snake_case=30 , __snake_case=400 , __snake_case=0.0 , __snake_case=False , **__snake_case , ) -> Any:
'''simple docstring'''
super().__init__(
feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
__a =n_fft
__a =hop_length
__a =chunk_length
__a =chunk_length * sampling_rate
__a =self.n_samples // hop_length
__a =sampling_rate
__a =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__snake_case , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__snake_case , norm='slaney' , mel_scale='slaney' , )
def __magic_name__ ( self , __snake_case ) -> np.ndarray:
'''simple docstring'''
__a =spectrogram(
__snake_case , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
__a =log_spec[:, :-1]
__a =np.maximum(__snake_case , log_spec.max() - 8.0 )
__a =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __magic_name__ ( __snake_case , __snake_case , __snake_case = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
__a =np.array(__snake_case , np.intaa )
__a =[]
for vector, length in zip(__snake_case , attention_mask.sum(-1 ) ):
__a =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__a =padding_value
normed_input_values.append(__snake_case )
else:
__a =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __snake_case , __snake_case = True , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = "max_length" , __snake_case = None , __snake_case = None , __snake_case = None , **__snake_case , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__a =isinstance(__snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__a =is_batched_numpy or (
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
__a =np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a =[np.asarray([raw_speech] ).T]
__a =BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
__a =self.pad(
__snake_case , padding=__snake_case , max_length=max_length if max_length else self.n_samples , truncation=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__a =self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
__a =np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
__a =padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
__a =[self._np_extract_fbank_features(__snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] , __snake_case ):
__a =[np.asarray(__snake_case , dtype=np.floataa ) for feature in input_features]
else:
__a =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__a =padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
__a =padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
def __magic_name__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 308
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __magic_name__ ( datasets.BeamBasedBuilder ):
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case )
class __magic_name__ ( datasets.BeamBasedBuilder ):
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def UpperCamelCase_( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class __magic_name__ ( lowerCAmelCase_ ):
@require_beam
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a =DummyBeamDataset(cache_dir=__snake_case , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__a =builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __snake_case )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __snake_case )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import apache_beam as beam
__a =beam.io.parquetio.WriteToParquet
__a =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a =DummyBeamDataset(cache_dir=__snake_case , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
__a =partial(__snake_case , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , 'default' , '0.0.0' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , 'default' , '0.0.0' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__a =builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __snake_case )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __snake_case )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a =DummyBeamDataset(cache_dir=__snake_case )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a =NestedBeamDataset(cache_dir=__snake_case , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
__a =builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __snake_case )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __snake_case )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 308
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
__a =text_generator('This is a test' , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
__a =text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
__snake_case , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
__a =text_generator('This is a test' , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{'generated_token_ids': ANY(__snake_case )},
{'generated_token_ids': ANY(__snake_case )},
] , )
__a =text_generator.model.config.eos_token_id
__a ='<pad>'
__a =text_generator(
['This is a test', 'This is a second test'] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{'generated_token_ids': ANY(__snake_case )},
{'generated_token_ids': ANY(__snake_case )},
],
[
{'generated_token_ids': ANY(__snake_case )},
{'generated_token_ids': ANY(__snake_case )},
],
] , )
@require_tf
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
__a =text_generator('This is a test' , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
__a =text_generator(['This is a test', 'This is a second test'] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a ='Hello I believe in'
__a =pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
__a =text_generator(__snake_case )
self.assertEqual(
__snake_case , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
__a =text_generator(__snake_case , stop_sequence=' fe' )
self.assertEqual(__snake_case , [{'generated_text': 'Hello I believe in fe'}] )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =text_generator.model
__a =text_generator.tokenizer
__a =text_generator('This is a test' )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__a =text_generator('This is a test' , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__a =pipeline(task='text-generation' , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
__a =text_generator('This is a test' )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__a =text_generator('This is a test' , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__a =text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__a =text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
__a =text_generator('test' , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
__a =text_generator('test' , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
__a =text_generator('test' , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__a =text_generator('' )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__a =text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__a =['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
__a =text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__a =pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a =pipe('This is a test' )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__a =pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__a =pipe('This is a test' )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__a =pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__a =pipe('This is a test' )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
import torch
__a =pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self ) -> int:
'''simple docstring'''
import torch
__a =pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=__snake_case , top_p=0.5 )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a ='Hello world'
__a =pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
__a =logging.get_logger('transformers.generation.tf_utils' )
else:
__a =logging.get_logger('transformers.generation.utils' )
__a ='Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
__a =text_generator(__snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
__a =text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
__a =text_generator(__snake_case , max_length=10 )
self.assertNotIn(__snake_case , cl.out )
| 308
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
for attribute in key.split('.' ):
__a =getattr(_snake_case , _snake_case )
if weight_type is not None:
__a =getattr(_snake_case , _snake_case ).shape
else:
__a =hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__a =value
elif weight_type == "weight_g":
__a =value
elif weight_type == "weight_v":
__a =value
elif weight_type == "bias":
__a =value
else:
__a =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str , _snake_case : int ):
"""simple docstring"""
__a =[]
__a =fairseq_model.state_dict()
__a =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a =False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == 'group' , )
__a =True
else:
for key, mapped_key in MAPPING.items():
__a ='sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a =True
if "*" in mapped_key:
__a =name.split(_snake_case )[0].split('.' )[-2]
__a =mapped_key.replace('*' , _snake_case )
if "weight_g" in name:
__a ='weight_g'
elif "weight_v" in name:
__a ='weight_v'
elif "weight" in name:
__a ='weight'
elif "bias" in name:
__a ='bias'
else:
__a =None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Dict ):
"""simple docstring"""
__a =full_name.split('conv_layers.' )[-1]
__a =name.split('.' )
__a =int(items[0] )
__a =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
__a =SEWConfig()
if is_finetuned:
__a =model.wav_encoder.wav_model.cfg
else:
__a =model.cfg
__a =fs_config.conv_bias
__a =eval(fs_config.conv_feature_layers )
__a =[x[0] for x in conv_layers]
__a =[x[1] for x in conv_layers]
__a =[x[2] for x in conv_layers]
__a ='gelu'
__a ='layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
__a =0.0
__a =fs_config.activation_fn.name
__a =fs_config.encoder_embed_dim
__a =0.02
__a =fs_config.encoder_ffn_embed_dim
__a =1e-5
__a =fs_config.encoder_layerdrop
__a =fs_config.encoder_attention_heads
__a =fs_config.conv_pos_groups
__a =fs_config.conv_pos
__a =len(_snake_case )
__a =fs_config.encoder_layers
__a =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__a =model.cfg
__a =fs_config.final_dropout
__a =fs_config.layerdrop
__a =fs_config.activation_dropout
__a =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__a =fs_config.attention_dropout
__a =fs_config.dropout_input
__a =fs_config.dropout
__a =fs_config.mask_channel_length
__a =fs_config.mask_channel_prob
__a =fs_config.mask_length
__a =fs_config.mask_prob
__a ='Wav2Vec2FeatureExtractor'
__a ='Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase_( _snake_case : str , _snake_case : Optional[int] , _snake_case : List[Any]=None , _snake_case : Optional[int]=None , _snake_case : Dict=True ):
"""simple docstring"""
if is_finetuned:
__a , __a , __a =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__a , __a , __a =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__a =SEWConfig.from_pretrained(_snake_case )
else:
__a =convert_config(model[0] , _snake_case )
__a =model[0].eval()
__a =True if config.feat_extract_norm == 'layer' else False
__a =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
__a =Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a =target_dict.pad_index
__a =target_dict.bos_index
__a =target_dict.pad_index
__a =target_dict.bos_index
__a =target_dict.eos_index
__a =len(target_dict.symbols )
__a =os.path.join(_snake_case , 'vocab.json' )
if not os.path.isdir(_snake_case ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
__a =WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_snake_case , )
__a =WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__a =SEWForCTC(_snake_case )
else:
__a =SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCAmelCase : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 308
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 1
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import heapq
def UpperCamelCase_( _snake_case : dict ):
"""simple docstring"""
__a =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_snake_case , [-1 * len(_snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
__a =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__a =heapq.heappop(_snake_case )[1][0]
chosen_vertices.add(_snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__a =elem[1][1].index(_snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 308
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : str = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'unispeech-sat'
def __init__( self , __snake_case=32 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.02 , __snake_case=1e-5 , __snake_case="group" , __snake_case="gelu" , __snake_case=(512, 512, 512, 512, 512, 512, 512) , __snake_case=(5, 2, 2, 2, 2, 2, 2) , __snake_case=(10, 3, 3, 3, 3, 2, 2) , __snake_case=False , __snake_case=128 , __snake_case=16 , __snake_case=False , __snake_case=True , __snake_case=0.05 , __snake_case=10 , __snake_case=2 , __snake_case=0.0 , __snake_case=10 , __snake_case=0 , __snake_case=320 , __snake_case=2 , __snake_case=0.1 , __snake_case=100 , __snake_case=256 , __snake_case=256 , __snake_case=0.1 , __snake_case="mean" , __snake_case=False , __snake_case=False , __snake_case=256 , __snake_case=(512, 512, 512, 512, 1500) , __snake_case=(5, 3, 3, 1, 1) , __snake_case=(1, 2, 3, 1, 1) , __snake_case=512 , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=504 , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
__a =hidden_size
__a =feat_extract_norm
__a =feat_extract_activation
__a =list(__snake_case )
__a =list(__snake_case )
__a =list(__snake_case )
__a =conv_bias
__a =num_conv_pos_embeddings
__a =num_conv_pos_embedding_groups
__a =len(self.conv_dim )
__a =num_hidden_layers
__a =intermediate_size
__a =hidden_act
__a =num_attention_heads
__a =hidden_dropout
__a =attention_dropout
__a =activation_dropout
__a =feat_proj_dropout
__a =final_dropout
__a =layerdrop
__a =layer_norm_eps
__a =initializer_range
__a =vocab_size
__a =num_clusters
__a =do_stable_layer_norm
__a =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a =apply_spec_augment
__a =mask_time_prob
__a =mask_time_length
__a =mask_time_min_masks
__a =mask_feature_prob
__a =mask_feature_length
__a =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a =num_codevectors_per_group
__a =num_codevector_groups
__a =contrastive_logits_temperature
__a =feat_quantizer_dropout
__a =num_negatives
__a =codevector_dim
__a =proj_codevector_dim
__a =diversity_loss_weight
# ctc loss
__a =ctc_loss_reduction
__a =ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a =list(__snake_case )
__a =list(__snake_case )
__a =list(__snake_case )
__a =xvector_output_dim
@property
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline
SCREAMING_SNAKE_CASE = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
SCREAMING_SNAKE_CASE = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE = False
@property
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 32
@property
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return 100
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__a ={
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__a =UNetaDConditionModel(**__snake_case )
return model
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.dummy_unet
__a =self.dummy_movq
__a =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=__snake_case , )
__a ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> str:
'''simple docstring'''
__a =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
__a =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
__a =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__a =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a =Image.fromarray(np.uinta(__snake_case ) ).convert('RGB' ).resize((256, 256) )
# create mask
__a =np.ones((64, 64) , dtype=np.floataa )
__a =0
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
__a =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =pipe(**self.get_dummy_inputs(__snake_case ) )
__a =output.images
__a =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__a =image[0, -3:, -3:, -1]
__a =image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__a =np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__a =np.ones((768, 768) , dtype=np.floataa )
__a =0
__a ='a hat'
__a =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__a =KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__a =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__a =torch.Generator(device='cpu' ).manual_seed(0 )
__a , __a =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__a =pipeline(
image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__a =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 308
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308
| 1
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = DebertaVaTokenizer
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =DebertaVaTokenizer(__snake_case , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a ='this is a test'
__a ='this is a test'
return input_text, output_text
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a ='<pad>'
__a =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(__snake_case ) , 3_0001 )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# fmt: off
__a =' \tHeLLo!how \n Are yoU? '
__a =['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__a =DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case )
__a =tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
__a =DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> str:
'''simple docstring'''
# fmt: off
__a ='I was born in 92000, and this is falsé.'
__a =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__a =DebertaVaTokenizer(__snake_case , split_by_punct=__snake_case )
__a =tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
__a =DebertaVaTokenizerFast(__snake_case , split_by_punct=__snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# fmt: off
__a ='I was born in 92000, and this is falsé.'
__a =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__a =DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
__a =DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# fmt: off
__a ='I was born in 92000, and this is falsé.'
__a =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__a =DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
__a =DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# fmt: off
__a ='I was born in 92000, and this is falsé.'
__a =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__a =DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
__a =DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# fmt: off
__a =' \tHeLLo!how \n Are yoU? '
__a =['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__a =DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
__a =DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.get_tokenizer()
__a =self.get_rust_tokenizer()
__a ='I was born in 92000, and this is falsé.'
__a =tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
__a =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
self.assertListEqual(__snake_case , __snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__a =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =self.get_rust_tokenizer()
__a =tokenizer.encode(__snake_case )
__a =rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a ='This is a test'
__a =[13, 1, 4398, 25, 21, 1289]
__a =['▁', 'T', 'his', '▁is', '▁a', '▁test']
__a =['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__a =DebertaVaTokenizer(__snake_case , keep_accents=__snake_case )
__a =DebertaVaTokenizerFast(__snake_case , keep_accents=__snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# fmt: off
__a ='I was born in 92000, and this is falsé.'
__a =[13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__a =['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__a =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =rust_tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =DebertaVaTokenizer(__snake_case )
__a =tokenizer.encode('sequence builders' )
__a =tokenizer.encode('multi-sequence build' )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __snake_case , )
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
# fmt: off
__a ={'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 308
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =model.config
__a =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__a =MBartConfig(
is_decoder=_snake_case , is_encoder_decoder=_snake_case , add_cross_attention=_snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_snake_case , add_final_layer_norm=_snake_case , )
return encoder_config, decoder_config
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if "encoder.model" in name:
__a =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
__a =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
__a =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__a ='encoder.' + name
if "attn.proj" in name:
__a =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__a =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
__a ='encoder.layernorm.bias'
return name
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a =orig_state_dict.pop(_snake_case )
if "qkv" in key:
__a =key.split('.' )
__a =int(key_split[3] )
__a =int(key_split[5] )
__a =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a =val[:dim, :]
__a =val[dim : dim * 2, :]
__a =val[-dim:, :]
else:
__a =val[:dim]
__a =val[dim : dim * 2]
__a =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a =val
return orig_state_dict
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =DonutModel.from_pretrained(_snake_case ).eval()
# load HuggingFace model
__a , __a =get_configs(_snake_case )
__a =DonutSwinModel(_snake_case )
__a =MBartForCausalLM(_snake_case )
__a =VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
__a =original_model.state_dict()
__a =convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify results on scanned document
__a =load_dataset('hf-internal-testing/example-documents' )
__a =dataset['test'][0]['image'].convert('RGB' )
__a =XLMRobertaTokenizerFast.from_pretrained(_snake_case , from_slow=_snake_case )
__a =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a =DonutProcessor(_snake_case , _snake_case )
__a =processor(_snake_case , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a ='When is the coffee break?'
__a =task_prompt.replace('{user_input}' , _snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a ='hello world'
else:
raise ValueError('Model name not supported' )
__a =original_model.decoder.tokenizer(_snake_case , add_special_tokens=_snake_case , return_tensors='pt' )[
'input_ids'
]
__a =original_model.encoder.model.patch_embed(_snake_case )
__a , __a =model.encoder.embeddings(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
# verify encoder hidden states
__a =original_model.encoder(_snake_case )
__a =model.encoder(_snake_case ).last_hidden_state
assert torch.allclose(_snake_case , _snake_case , atol=1e-2 )
# verify decoder hidden states
__a =original_model(_snake_case , _snake_case , _snake_case ).logits
__a =model(_snake_case , decoder_input_ids=_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=16 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ) -> Dict:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_token_type_ids
__a =use_labels
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =scope
__a =self.vocab_size - 1
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__a =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , *__snake_case ) -> Dict:
'''simple docstring'''
__a =OpenAIGPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case )
__a =model(__snake_case , token_type_ids=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , *__snake_case ) -> List[Any]:
'''simple docstring'''
__a =OpenAIGPTLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , *__snake_case ) -> Optional[int]:
'''simple docstring'''
__a =OpenAIGPTDoubleHeadsModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , *__snake_case ) -> Optional[Any]:
'''simple docstring'''
__a =self.num_labels
__a =OpenAIGPTForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> Dict:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__a =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case , )
__a =inputs_dict['labels']
__a =inputs_dict['labels']
__a =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__snake_case , )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =OpenAIGPTModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , n_embd=37 )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__snake_case )
@slow
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =OpenAIGPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__snake_case )
__a =torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__snake_case ) # the president is
__a =[
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() , __snake_case )
| 308
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
SCREAMING_SNAKE_CASE = 42 # [batch_size x 3]
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __magic_name__ ( self ) -> torch.Tensor:
'''simple docstring'''
__a =torch.arange(self.height * self.width )
__a =torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a , *__a =self.shape
__a =int(np.prod(__snake_case ) )
__a =self.get_image_coords()
__a =torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a =self.get_camera_rays(__snake_case )
__a =rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __magic_name__ ( self , __snake_case ) -> torch.Tensor:
'''simple docstring'''
__a , *__a , __a =coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a =coords.view(__snake_case , -1 , 2 )
__a =self.resolution()
__a =self.fov()
__a =(flat.float() / (res - 1)) * 2 - 1
__a =fracs * torch.tan(fov / 2 )
__a =fracs.view(__snake_case , -1 , 2 )
__a =(
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
__a =directions / directions.norm(dim=-1 , keepdim=__snake_case )
__a =torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def __magic_name__ ( self , __snake_case , __snake_case ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =[]
__a =[]
__a =[]
__a =[]
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__a =np.array([np.sin(_snake_case ), np.cos(_snake_case ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a =-z * 4
__a =np.array([np.cos(_snake_case ), -np.sin(_snake_case ), 0.0] )
__a =np.cross(_snake_case , _snake_case )
origins.append(_snake_case )
xs.append(_snake_case )
ys.append(_snake_case )
zs.append(_snake_case )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_snake_case , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_snake_case , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_snake_case , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_snake_case , axis=0 ) ).float() , width=_snake_case , height=_snake_case , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_snake_case )) , )
| 308
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE = frozenset([] )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__a =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
__a =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
__a =DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_zero=__snake_case , )
torch.manual_seed(0 )
__a =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a =CLIPTextModel(__snake_case )
__a =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> Optional[Any]:
'''simple docstring'''
__a =floats_tensor((1, 16, 16) , rng=random.Random(__snake_case ) ).to(__snake_case )
__a =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> Any:
'''simple docstring'''
__a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
__a =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a =Image.fromarray(np.uinta(__snake_case ) ).convert('RGB' )
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> str:
'''simple docstring'''
__a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
__a =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a =Image.fromarray(np.uinta(__snake_case ) ).convert('RGB' )
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__snake_case , __snake_case , __snake_case )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__a =self.get_dummy_inputs(__snake_case )
__a =pipe(**__snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__snake_case )
__a =self.pipeline_class.from_pretrained(__snake_case )
pipe_loaded.to(__snake_case )
pipe_loaded.set_progress_bar_config(disable=__snake_case )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__snake_case , __snake_case ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
__a =self.get_dummy_inputs(__snake_case )
__a =pipe_loaded(**__snake_case )[0]
__a =np.abs(output - output_loaded ).max()
self.assertLess(__snake_case , 1e-4 )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_mask_inputs(__snake_case )
__a =pipe.generate_mask(**__snake_case )
__a =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__a =np.array([0] * 9 )
__a =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inversion_inputs(__snake_case )
__a =pipe.invert(**__snake_case ).images
__a =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
__a =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a ={'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
__a =DPMSolverMultistepScheduler(**__snake_case )
__a =DPMSolverMultistepInverseScheduler(**__snake_case )
__a =self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inversion_inputs(__snake_case )
__a =pipe.invert(**__snake_case ).images
__a =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
__a =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
__a =raw_image.convert('RGB' ).resize((768, 768) )
__a =raw_image
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =torch.manual_seed(0 )
__a =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__snake_case , torch_dtype=torch.floataa )
__a =DDIMScheduler.from_config(pipe.scheduler.config )
__a =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
__a ='a bowl of fruit'
__a ='a bowl of pears'
__a =pipe.generate_mask(
image=self.raw_image , source_prompt=__snake_case , target_prompt=__snake_case , generator=__snake_case , )
__a =pipe.invert(
prompt=__snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=__snake_case ).latents
__a =pipe(
prompt=__snake_case , mask_image=__snake_case , image_latents=__snake_case , generator=__snake_case , negative_prompt=__snake_case , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
__a =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =torch.manual_seed(0 )
__a =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__snake_case , torch_dtype=torch.floataa )
__a =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
__a ='a bowl of fruit'
__a ='a bowl of pears'
__a =pipe.generate_mask(
image=self.raw_image , source_prompt=__snake_case , target_prompt=__snake_case , generator=__snake_case , )
__a =pipe.invert(
prompt=__snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=__snake_case , num_inference_steps=25 , ).latents
__a =pipe(
prompt=__snake_case , mask_image=__snake_case , image_latents=__snake_case , generator=__snake_case , negative_prompt=__snake_case , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
__a =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 308
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = CanineTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
__a =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return CanineTokenizer.from_pretrained('google/canine-s' )
def __magic_name__ ( self , **__snake_case ) -> CanineTokenizer:
'''simple docstring'''
__a =self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
__a =1024
return tokenizer
@require_torch
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.canine_tokenizer
__a =['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
__a =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
__a =tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
self.assertIsInstance(__snake_case , __snake_case )
__a =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.canine_tokenizer
__a =['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
__a =tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , __snake_case )
self.assertIn('attention_mask' , __snake_case )
self.assertIn('token_type_ids' , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.canine_tokenizer
__a =[
'What\'s the weater?',
'It\'s about 25 degrees.',
]
__a =tokenizer(
text_target=__snake_case , max_length=32 , padding='max_length' , truncation=__snake_case , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
# safety check on max_len default value so we are sure the test works
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__a =tempfile.mkdtemp()
__a =' He is very happy, UNwant\u00E9d,running'
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__a =tokenizer.__class__.from_pretrained(__snake_case )
__a =after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
shutil.rmtree(__snake_case )
__a =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__a =tempfile.mkdtemp()
__a =' He is very happy, UNwant\u00E9d,running'
__a =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__a =chr(0Xe007 )
additional_special_tokens.append(__snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__a =tokenizer.__class__.from_pretrained(__snake_case )
__a =after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertIn(__snake_case , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__a =tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__snake_case )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__a , __a =self.get_clean_sequence(__snake_case )
# a special token for Canine can be defined as follows:
__a =0Xe005
__a =chr(__snake_case )
tokenizer.add_special_tokens({'cls_token': special_token} )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(len(__snake_case ) , 1 )
__a =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(__snake_case , input_encoded + special_token_id )
__a =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__a =chr(0Xe005 )
__a =chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__snake_case )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
__a =tokenizer.tokenize(__snake_case )
__a =tokenizer.tokenize(__snake_case )
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(token_a[0] , __snake_case )
self.assertEqual(token_a[0] , __snake_case )
@require_tokenizers
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
__a =0Xe006
__a =chr(__snake_case )
__a =AddedToken(__snake_case , lstrip=__snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__snake_case )
tokenizer.from_pretrained(__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__a =json.load(__snake_case )
with open(os.path.join(__snake_case , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__a =json.load(__snake_case )
# a special token for Canine can be defined as follows:
__a =0Xe006
__a =chr(__snake_case )
__a =[new_token_a]
__a =[new_token_a]
with open(os.path.join(__snake_case , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__snake_case , __snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__a =tokenizer_class.from_pretrained(__snake_case , extra_ids=0 )
self.assertIn(__snake_case , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__a =0Xe007
__a =chr(__snake_case )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__a =[AddedToken(__snake_case , lstrip=__snake_case )]
__a =tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , extra_ids=0 )
self.assertIn(__snake_case , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__a ='hello world'
if self.space_between_special_tokens:
__a ='[CLS] hello world [SEP]'
else:
__a =input
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer.decode(__snake_case , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__snake_case , [output, output.lower()] )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__a =[
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__a ='a'
__a =ord(__snake_case )
for attr in attributes_list:
setattr(__snake_case , attr + '_id' , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + '_id' ) , __snake_case )
setattr(__snake_case , attr + '_id' , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + '_id' ) , __snake_case )
setattr(__snake_case , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(__snake_case , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(__snake_case , 'additional_special_tokens_ids' ) , [] )
__a =0Xe006
__a =chr(__snake_case )
setattr(__snake_case , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(__snake_case , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(__snake_case , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> int:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> str:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
| 308
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'mgp-str'
def __init__( self , __snake_case=[32, 128] , __snake_case=4 , __snake_case=3 , __snake_case=27 , __snake_case=38 , __snake_case=5_0257 , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=4.0 , __snake_case=True , __snake_case=False , __snake_case=1e-5 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=False , __snake_case=0.02 , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =max_token_length
__a =num_character_labels
__a =num_bpe_labels
__a =num_wordpiece_labels
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =mlp_ratio
__a =distilled
__a =layer_norm_eps
__a =drop_rate
__a =qkv_bias
__a =attn_drop_rate
__a =drop_path_rate
__a =output_aa_attentions
__a =initializer_range
| 308
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_lowerCAmelCase : str = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def UpperCamelCase_( _snake_case : Any , _snake_case : Dict=False ):
"""simple docstring"""
__a , __a =create_model(
'HTSAT-tiny' , 'roberta' , _snake_case , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_snake_case , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a ={}
__a =r'.*sequential.(\d+).*'
__a =r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a =key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
__a =re.match(_snake_case , _snake_case ).group(1 )
__a =key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(_snake_case )//3}.linear.' )
elif re.match(_snake_case , _snake_case ):
__a =int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__a =1 if projecton_layer == 0 else 2
__a =key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__a =value
__a =mixed_qkv.size(0 ) // 3
__a =mixed_qkv[:qkv_dim]
__a =mixed_qkv[qkv_dim : qkv_dim * 2]
__a =mixed_qkv[qkv_dim * 2 :]
__a =query_layer
__a =key_layer
__a =value_layer
else:
__a =value
return model_state_dict
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Dict=False ):
"""simple docstring"""
__a , __a =init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
__a =clap_model.state_dict()
__a =rename_state_dict(_snake_case )
__a =ClapConfig()
__a =enable_fusion
__a =ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 308
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 1
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : Dict = get_logger(__name__)
class __magic_name__ :
SCREAMING_SNAKE_CASE = 'dummy_data'
SCREAMING_SNAKE_CASE = 'datasets'
SCREAMING_SNAKE_CASE = False
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = None , __snake_case = False , __snake_case = True , __snake_case = None , ) -> Tuple:
'''simple docstring'''
__a =0
__a =dataset_name
__a =cache_dir
__a =use_local_dummy_data
__a =config
# download_callbacks take a single url as input
__a =download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__a =load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__a =str(__snake_case )
# to be downloaded
__a =None
__a =None
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
if self._dummy_file is None:
__a =self.download_dummy_data()
return self._dummy_file
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__a =cached_path(
__snake_case , cache_dir=self.cache_dir , extract_compressed_file=__snake_case , force_extract=__snake_case )
return os.path.join(__snake_case , self.dummy_file_name )
@property
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._bucket_url is None:
__a =hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def __magic_name__ ( self , __snake_case , *__snake_case ) -> Tuple:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__a =self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__a =self.dummy_file_name
# special case when data_url is a dict
if isinstance(__snake_case , __snake_case ):
return self.create_dummy_data_dict(__snake_case , __snake_case )
elif isinstance(__snake_case , (list, tuple) ):
return self.create_dummy_data_list(__snake_case , __snake_case )
else:
return self.create_dummy_data_single(__snake_case , __snake_case )
def __magic_name__ ( self , __snake_case , *__snake_case ) -> int:
'''simple docstring'''
return self.download_and_extract(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
return self.download_and_extract(__snake_case )
def __magic_name__ ( self , __snake_case , *__snake_case , **__snake_case ) -> Dict:
'''simple docstring'''
return path
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return {}
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ={}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__snake_case , __snake_case ):
for single_url in single_urls:
download_callback(__snake_case )
else:
__a =single_urls
download_callback(__snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__snake_case , __snake_case ):
__a =[os.path.join(__snake_case , urllib.parse.quote_plus(Path(__snake_case ).name ) ) for x in single_urls]
else:
__a =single_urls
__a =os.path.join(__snake_case , urllib.parse.quote_plus(Path(__snake_case ).name ) )
__a =value
# make sure that values are unique
if all(isinstance(__snake_case , __snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__a ={key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
__a =[]
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__a =all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , __snake_case ) ) for url in data_url )
__a =all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__a =[data_url[0]] * len(__snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__a =os.path.join(__snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(__snake_case )
return dummy_data_list
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__a =os.path.join(__snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(__snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
def _iter_archive_members(__snake_case ):
# this preserves the order of the members inside the ZIP archive
__a =Path(self.dummy_file ).parent
__a =path.relative_to(__snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__a =zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__snake_case )
__a =Path(__snake_case )
__a =_iter_archive_members(__snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(__snake_case ).as_posix(), file_path.open('rb' )
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
__a =[paths]
for path in paths:
if os.path.isfile(__snake_case ):
if os.path.basename(__snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__snake_case ):
if os.path.basename(__snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(__snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(__snake_case , __snake_case )
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case=1024 , __snake_case=1024 , __snake_case=3.6 ) -> Any:
'''simple docstring'''
__a =tokenizer
__a =tokenizer.bos_token_id
__a =dataset
__a =seq_length
__a =seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> List[Any]:
'''simple docstring'''
__a =iter(self.dataset )
__a =True
while more_examples:
__a , __a =[], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__snake_case )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__a =False
break
__a =tokenizer(__snake_case , truncation=__snake_case )['input_ids']
__a =[]
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__snake_case ) , self.seq_length ):
__a =all_token_ids[i : i + self.seq_length]
if len(__snake_case ) == self.seq_length:
yield torch.tensor(__snake_case )
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a ={'streaming': True}
__a =load_dataset(args.dataset_name , split='train' , **_snake_case )
__a =ConstantLengthDataset(_snake_case , _snake_case , seq_length=args.seq_length )
__a =DataLoader(_snake_case , batch_size=args.batch_size )
return eval_dataloader
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
model.eval()
__a =[]
for step, batch in enumerate(_snake_case ):
with torch.no_grad():
__a =model(_snake_case , labels=_snake_case )
__a =outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_snake_case ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__a =torch.mean(torch.cat(_snake_case ) )
try:
__a =torch.exp(_snake_case )
except OverflowError:
__a =float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowerCAmelCase : str = Accelerator()
# Parse configuration
_lowerCAmelCase : str = HfArgumentParser(EvaluationArguments)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_lowerCAmelCase : str = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowerCAmelCase : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowerCAmelCase , _lowerCAmelCase : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =dataset
__a =process
__a =params
def __len__( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , __snake_case ) -> Any:
'''simple docstring'''
__a =self.dataset[i]
__a =self.process(__snake_case , **self.params )
return processed
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case=None ) -> str:
'''simple docstring'''
__a =loader
__a =infer
__a =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__a =None
__a =loader_batch_size
# Internal bookkeeping
__a =None
__a =None
def __len__( self ) -> Dict:
'''simple docstring'''
return len(self.loader )
def __iter__( self ) -> List[Any]:
'''simple docstring'''
__a =iter(self.loader )
return self
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__a =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__a ={}
for k, element in self._loader_batch_data.items():
if isinstance(__snake_case , __snake_case ):
# Convert ModelOutput to tuple first
__a =element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__a =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__a =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__snake_case , __snake_case ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__a =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__a =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__a =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__a =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__a =np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__a =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__a =self._loader_batch_data.__class__(__snake_case )
self._loader_batch_index += 1
return result
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__a =next(self.iterator )
__a =self.infer(__snake_case , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__snake_case , torch.Tensor ):
__a =processed
else:
__a =list(processed.keys() )[0]
__a =processed[key]
if isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
__a =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__a =observed_batch_size
# Setting internal index to unwrap the batch
__a =processed
__a =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case=None ) -> Tuple:
'''simple docstring'''
super().__init__(__snake_case , __snake_case , __snake_case )
def __iter__( self ) -> List[str]:
'''simple docstring'''
__a =iter(self.loader )
__a =None
return self
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.subiterator is None:
__a =self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__a =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__a =self.infer(next(self.iterator ) , **self.params )
__a =next(self.subiterator )
return processed
class __magic_name__ ( lowerCAmelCase_ ):
def __iter__( self ) -> Dict:
'''simple docstring'''
__a =iter(self.loader )
return self
def __magic_name__ ( self ) -> int:
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__a =False
__a =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__a =self.loader_batch_item()
__a =item.pop('is_last' )
accumulator.append(__snake_case )
if is_last:
return accumulator
while not is_last:
__a =self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__snake_case , torch.Tensor ):
__a =processed
else:
__a =list(processed.keys() )[0]
__a =processed[key]
if isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
__a =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__a =observed_batch_size
__a =processed
__a =0
while self._loader_batch_index < self.loader_batch_size:
__a =self.loader_batch_item()
__a =item.pop('is_last' )
accumulator.append(__snake_case )
if is_last:
return accumulator
else:
__a =processed
__a =item.pop('is_last' )
accumulator.append(__snake_case )
return accumulator
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =dataset
__a =key
def __len__( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , __snake_case ) -> Tuple:
'''simple docstring'''
return self.dataset[i][self.key]
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =dataset
__a =keya
__a =keya
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , __snake_case ) -> Any:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 308
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCamelCase_( _snake_case : str , _snake_case : complex , _snake_case : str = "x" , _snake_case : float = 10**-10 , _snake_case : int = 1 , ):
"""simple docstring"""
__a =symbols(_snake_case )
__a =lambdify(_snake_case , _snake_case )
__a =lambdify(_snake_case , diff(_snake_case , _snake_case ) )
__a =starting_point
while True:
if diff_function(_snake_case ) != 0:
__a =prev_guess - multiplicity * func(_snake_case ) / diff_function(
_snake_case )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__a =next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 308
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowerCAmelCase : Union[str, Any] = 250_004
_lowerCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MBartaaTokenizer
SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =MBartaaTokenizer(__snake_case , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a ='<s>'
__a =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__snake_case ) , 1054 )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =MBartaaTokenizer(__snake_case , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
# fmt: off
__a ={'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/mbart-large-50-one-to-many-mmt'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def __magic_name__ ( cls ) -> Optional[Any]:
'''simple docstring'''
__a =MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__a =1
return cls
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
__a =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =MBartaaTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors='pt' )
__a =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(__snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 308
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase_( ):
"""simple docstring"""
__a ='https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('RGB' )
return image
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =[]
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
__a =dct.pop(_snake_case )
__a =val
def UpperCamelCase_( _snake_case : int , _snake_case : Any ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__a =state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
__a =state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__a =torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__a =qkv_bias
def UpperCamelCase_( _snake_case : Dict , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =364 if 'coco' in model_name else 224
__a =BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__a =OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__a =OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__a =TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__a =TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__a =BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=False ):
"""simple docstring"""
__a =(
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__a =tokenizer('\n' , add_special_tokens=_snake_case ).input_ids[0]
__a , __a =get_blipa_config(_snake_case , eos_token_id=_snake_case )
__a =BlipaForConditionalGeneration(_snake_case ).eval()
__a ={
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__a , __a =model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__a ='cuda' if torch.cuda.is_available() else 'cpu'
__a , __a , __a =load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print('Done!' )
# update state dict keys
__a =original_model.state_dict()
__a =create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__a =state_dict.pop(_snake_case )
if key.startswith('Qformer.bert' ):
__a =key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__a =key.replace('self' , 'attention' )
if "opt_proj" in key:
__a =key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__a =key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__a =key.replace('opt' , 'language' )
if key.startswith('t5' ):
__a =key.replace('t5' , 'language' )
__a =val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__a , __a =hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__a =load_demo_image()
__a =vis_processors['eval'](_snake_case ).unsqueeze(0 ).to(_snake_case )
__a =tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_snake_case )
# create processor
__a =BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_snake_case , image_std=_snake_case )
__a =BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__a =original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__a =hf_model(_snake_case , _snake_case ).logits
else:
__a =original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__a =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__a =hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__a =torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__a =torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__a =logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__a =''
__a =tokenizer(_snake_case , return_tensors='pt' ).input_ids.to(_snake_case )
__a =original_model.generate({'image': original_pixel_values} )
__a =hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _snake_case )
__a =input_ids.shape[1]
__a =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__a =[text.strip() for text in output_text]
print('HF generation:' , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
_lowerCAmelCase : List[Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowerCAmelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 1
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( _snake_case : Any , _snake_case : Optional[int] ):
"""simple docstring"""
__a =old_name
if "patch_embed" in old_name:
__a , __a , __a =old_name.split('.' )
if layer == "0":
__a =old_name.replace('0' , 'convolution1' )
elif layer == "1":
__a =old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__a =old_name.replace('3' , 'convolution2' )
else:
__a =old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _snake_case ):
__a =r'\b\d{2}\b'
if bool(re.search(_snake_case , _snake_case ) ):
__a =re.search(r'\d\.\d\d.' , _snake_case ).group()
else:
__a =re.search(r'\d\.\d.' , _snake_case ).group()
if int(match[0] ) < 6:
__a =old_name.replace(_snake_case , '' )
__a =trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__a ='intermediate_stages.' + trimmed_name
else:
__a =old_name.replace(_snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
__a =trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__a =str(int(match[2] ) - num_meta4D_last_stage )
__a =trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__a =trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__a =trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__a =trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__a =trimmed_name.replace('fc2' , 'linear_out' )
__a ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _snake_case ):
__a =old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__a =new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a =new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a =new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__a =new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__a =new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__a =new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__a ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a =new_name.replace('norm' , 'layernorm' )
__a ='efficientformer.' + new_name
else:
__a ='efficientformer.encoder.' + new_name
return new_name
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : List[str] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__a =checkpoint.pop(_snake_case )
__a =val
return checkpoint
def UpperCamelCase_( ):
"""simple docstring"""
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
def UpperCamelCase_( _snake_case : Path , _snake_case : Path , _snake_case : Path , _snake_case : bool ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )['model']
__a =EfficientFormerConfig.from_json_file(_snake_case )
__a =EfficientFormerForImageClassificationWithTeacher(_snake_case )
__a ='_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__a =config.depths[-1] - config.num_metaad_blocks + 1
__a =convert_torch_checkpoint(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__a =prepare_img()
__a =256
__a =224
__a =EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
__a =Compose(
[
Resize(_snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_snake_case ),
ToTensor(),
Normalize(_snake_case , _snake_case ),
] )
__a =image_transforms(_snake_case ).unsqueeze(0 )
assert torch.allclose(_snake_case , _snake_case )
__a =model(_snake_case )
__a =outputs.logits
__a =(1, 1000)
if "l1" in model_name:
__a =torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a =torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a =torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(_snake_case )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=_snake_case , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 308
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 1
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =abs(_snake_case )
__a =0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =abs(_snake_case )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
return sum(int(_snake_case ) for c in str(abs(_snake_case ) ) )
def UpperCamelCase_( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_snake_case : Callable , _snake_case : int ) -> None:
__a =F'{func.__name__}({value})'
__a =timeit(F'__main__.{call}' , setup='import __main__' )
print(F'{call:56} = {func(_snake_case )} -- {timing:.4f} seconds' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_snake_case , _snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =int(_snake_case )
assert noofclusters < len(_snake_case )
# Find out the dimensionality
__a =len(vectors[0] )
# Will help select random centroids from among the available vectors
__a =list(range(len(_snake_case ) ) )
shuffle(_snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__a =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__a =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__a =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(_snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
__a =tf.placeholder('float64' , [dim] )
__a =[]
for centroid in centroids:
cent_assigns.append(tf.assign(_snake_case , _snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__a =[tf.Variable(0 ) for i in range(len(_snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__a =tf.placeholder('int32' )
__a =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(_snake_case , _snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__a =tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__a =tf.reduce_mean(_snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__a =tf.placeholder('float' , [dim] )
__a =tf.placeholder('float' , [dim] )
__a =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_snake_case , _snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__a =tf.placeholder('float' , [noofclusters] )
__a =tf.argmin(_snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__a =tf.initialize_all_variables()
# Initialize all variables
sess.run(_snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__a =100
for _ in range(_snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_snake_case ) ):
__a =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__a =[
sess.run(_snake_case , feed_dict={va: vect, va: sess.run(_snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__a =sess.run(
_snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_snake_case ):
# Collect all the vectors assigned to this cluster
__a =[
vectors[i]
for i in range(len(_snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__a =sess.run(
_snake_case , feed_dict={mean_input: array(_snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__a =sess.run(_snake_case )
__a =sess.run(_snake_case )
return centroids, assignments
| 308
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 1
|
from functools import lru_cache
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =2
__a =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_snake_case )
if n > 1:
factors.add(_snake_case )
return factors
@lru_cache
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
return len(unique_prime_factors(_snake_case ) )
def UpperCamelCase_( _snake_case : list ):
"""simple docstring"""
return len(set(_snake_case ) ) in (0, 1)
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =2
while True:
# Increment each value of a generated range
__a =[base + i for i in range(_snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a =[upf_len(_snake_case ) for x in group]
checker.append(_snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(_snake_case ):
return group
# Increment our base variable by 1
base += 1
def UpperCamelCase_( _snake_case : int = 4 ):
"""simple docstring"""
__a =run(_snake_case )
return results[0] if len(_snake_case ) else None
if __name__ == "__main__":
print(solution())
| 308
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
def UpperCamelCase_( ):
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 1
|
from collections.abc import Callable
class __magic_name__ :
def __init__( self , __snake_case = None ) -> None:
'''simple docstring'''
# Stores actual heap items.
__a =[]
# Stores indexes of each item for supporting updates and deletion.
__a ={}
# Stores current size of heap.
__a =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__a =key or (lambda __snake_case : x)
def __magic_name__ ( self , __snake_case ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __magic_name__ ( self , __snake_case ) -> int | None:
'''simple docstring'''
__a =int(2 * i + 1 )
return left if 0 < left < self.size else None
def __magic_name__ ( self , __snake_case ) -> int | None:
'''simple docstring'''
__a =int(2 * i + 2 )
return right if 0 < right < self.size else None
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
__a , __a =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__a , __a =self.arr[j], self.arr[i]
def __magic_name__ ( self , __snake_case , __snake_case ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =self._left(__snake_case )
__a =self._right(__snake_case )
__a =i
if left is not None and not self._cmp(__snake_case , __snake_case ):
__a =left
if right is not None and not self._cmp(__snake_case , __snake_case ):
__a =right
return valid_parent
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self._parent(__snake_case )
while parent is not None and not self._cmp(__snake_case , __snake_case ):
self._swap(__snake_case , __snake_case )
__a , __a =parent, self._parent(__snake_case )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self._get_valid_parent(__snake_case )
while valid_parent != index:
self._swap(__snake_case , __snake_case )
__a , __a =valid_parent, self._get_valid_parent(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__a =self.pos_map[item]
__a =[item, self.key(__snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
__a =self.pos_map[item]
del self.pos_map[item]
__a =self.arr[self.size - 1]
__a =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__snake_case )
self._heapify_down(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
__a =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__snake_case )] )
else:
__a =[item, self.key(__snake_case )]
__a =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __magic_name__ ( self ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def __magic_name__ ( self ) -> tuple | None:
'''simple docstring'''
__a =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase_( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BICUBIC , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 255 , __snake_case = True , __snake_case = None , __snake_case = None , __snake_case = True , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'shortest_edge': 224}
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
__a =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__a =get_size_dict(__snake_case , default_to_square=__snake_case , param_name='crop_size' )
__a =do_resize
__a =size
__a =resample
__a =do_center_crop
__a =crop_size
__a =do_rescale
__a =rescale_factor
__a =do_normalize
__a =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a =image_std if image_std is not None else OPENAI_CLIP_STD
__a =do_convert_rgb
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__a =get_resize_output_image_size(__snake_case , size=size['shortest_edge'] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> List[Any]:
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case , param_name='size' , default_to_square=__snake_case )
__a =resample if resample is not None else self.resample
__a =do_center_crop if do_center_crop is not None else self.do_center_crop
__a =crop_size if crop_size is not None else self.crop_size
__a =get_size_dict(__snake_case , param_name='crop_size' , default_to_square=__snake_case )
__a =do_rescale if do_rescale is not None else self.do_rescale
__a =rescale_factor if rescale_factor is not None else self.rescale_factor
__a =do_normalize if do_normalize is not None else self.do_normalize
__a =image_mean if image_mean is not None else self.image_mean
__a =image_std if image_std is not None else self.image_std
__a =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a =[convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
__a =[to_numpy_array(__snake_case ) for image in images]
if do_resize:
__a =[self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
__a =[self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
__a =[self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
__a =[self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
__a =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a ={'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 308
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308
| 1
|
from math import sqrt
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =0
for i in range(1 , int(sqrt(_snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(_snake_case ):
total += i + n // i
elif i == sqrt(_snake_case ):
total += i
return total - n
def UpperCamelCase_( _snake_case : int = 10000 ):
"""simple docstring"""
__a =sum(
i
for i in range(1 , _snake_case )
if sum_of_divisors(sum_of_divisors(_snake_case ) ) == i and sum_of_divisors(_snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 308
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =model.config
__a =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__a =MBartConfig(
is_decoder=_snake_case , is_encoder_decoder=_snake_case , add_cross_attention=_snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_snake_case , add_final_layer_norm=_snake_case , )
return encoder_config, decoder_config
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if "encoder.model" in name:
__a =name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
__a =name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
__a =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a =name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__a ='encoder.' + name
if "attn.proj" in name:
__a =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__a =name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a =name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a ='encoder.layernorm.weight'
if name == "encoder.norm.bias":
__a ='encoder.layernorm.bias'
return name
def UpperCamelCase_( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a =orig_state_dict.pop(_snake_case )
if "qkv" in key:
__a =key.split('.' )
__a =int(key_split[3] )
__a =int(key_split[5] )
__a =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a =val[:dim, :]
__a =val[dim : dim * 2, :]
__a =val[-dim:, :]
else:
__a =val[:dim]
__a =val[dim : dim * 2]
__a =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__a =val
return orig_state_dict
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=False ):
"""simple docstring"""
__a =DonutModel.from_pretrained(_snake_case ).eval()
# load HuggingFace model
__a , __a =get_configs(_snake_case )
__a =DonutSwinModel(_snake_case )
__a =MBartForCausalLM(_snake_case )
__a =VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
__a =original_model.state_dict()
__a =convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify results on scanned document
__a =load_dataset('hf-internal-testing/example-documents' )
__a =dataset['test'][0]['image'].convert('RGB' )
__a =XLMRobertaTokenizerFast.from_pretrained(_snake_case , from_slow=_snake_case )
__a =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__a =DonutProcessor(_snake_case , _snake_case )
__a =processor(_snake_case , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__a ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a ='When is the coffee break?'
__a =task_prompt.replace('{user_input}' , _snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__a ='<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__a ='<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__a ='s_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__a ='<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__a ='hello world'
else:
raise ValueError('Model name not supported' )
__a =original_model.decoder.tokenizer(_snake_case , add_special_tokens=_snake_case , return_tensors='pt' )[
'input_ids'
]
__a =original_model.encoder.model.patch_embed(_snake_case )
__a , __a =model.encoder.embeddings(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
# verify encoder hidden states
__a =original_model.encoder(_snake_case )
__a =model.encoder(_snake_case ).last_hidden_state
assert torch.allclose(_snake_case , _snake_case , atol=1e-2 )
# verify decoder hidden states
__a =original_model(_snake_case , _snake_case , _snake_case ).logits
__a =model(_snake_case , decoder_input_ids=_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCAmelCase : Optional[int] = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = AltDiffusionPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__a =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
__a =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__a =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__a =CLIPTextModel(__snake_case )
__a =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__a =77
__a ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> int:
'''simple docstring'''
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a ='cpu' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
torch.manual_seed(0 )
__a =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__a =RobertaSeriesModelWithTransformation(__snake_case )
__a =text_encoder
__a =AltDiffusionPipeline(**__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a ='A photo of an astronaut'
__a =alt_pipe(**__snake_case )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a =np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a ='cpu' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
__a =PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
__a =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__a =RobertaSeriesModelWithTransformation(__snake_case )
__a =text_encoder
__a =AltDiffusionPipeline(**__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a =alt_pipe(**__snake_case )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a =np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
# make sure here that pndm scheduler skips prk
__a =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a ='A painting of a squirrel eating a burger'
__a =torch.manual_seed(0 )
__a =alt_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
__a =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=__snake_case , safety_checker=__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a ='A painting of a squirrel eating a burger'
__a =torch.manual_seed(0 )
__a =alt_pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='numpy' )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 308
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 1
|
from ...processing_utils import ProcessorMixin
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'WhisperFeatureExtractor'
SCREAMING_SNAKE_CASE = 'WhisperTokenizer'
def __init__( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
__a =self.feature_extractor
__a =False
def __magic_name__ ( self , __snake_case=None , __snake_case=None , __snake_case=True ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self , *__snake_case , **__snake_case ) -> int:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
__a =kwargs.pop('audio' , __snake_case )
__a =kwargs.pop('sampling_rate' , __snake_case )
__a =kwargs.pop('text' , __snake_case )
if len(__snake_case ) > 0:
__a =args[0]
__a =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__a =self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
__a =self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a =encodings['input_ids']
return inputs
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case="np" ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 308
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'encoder-decoder'
SCREAMING_SNAKE_CASE = True
def __init__( self , **__snake_case ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__a =kwargs.pop('encoder' )
__a =encoder_config.pop('model_type' )
__a =kwargs.pop('decoder' )
__a =decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__a =AutoConfig.for_model(__snake_case , **__snake_case )
__a =AutoConfig.for_model(__snake_case , **__snake_case )
__a =True
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case , **__snake_case ) -> PretrainedConfig:
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__a =True
__a =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.encoder.to_dict()
__a =self.decoder.to_dict()
__a =self.__class__.model_type
return output
| 308
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=True , __snake_case=1 / 255 , __snake_case=True , ) -> Tuple:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a =size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__a =parent
__a =batch_size
__a =num_channels
__a =min_resolution
__a =max_resolution
__a =do_resize
__a =size
__a =do_normalize
__a =image_mean
__a =image_std
__a =do_rescale
__a =rescale_factor
__a =do_pad
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __magic_name__ ( self , __snake_case , __snake_case=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
__a =image_inputs[0]
if isinstance(__snake_case , Image.Image ):
__a , __a =image.size
else:
__a , __a =image.shape[1], image.shape[2]
if w < h:
__a =int(self.size['shortest_edge'] * h / w )
__a =self.size['shortest_edge']
elif w > h:
__a =self.size['shortest_edge']
__a =int(self.size['shortest_edge'] * w / h )
else:
__a =self.size['shortest_edge']
__a =self.size['shortest_edge']
else:
__a =[]
for image in image_inputs:
__a , __a =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a =max(__snake_case , key=lambda __snake_case : item[0] )[0]
__a =max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =DeformableDetrImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , 'image_mean' ) )
self.assertTrue(hasattr(__snake_case , 'image_std' ) )
self.assertTrue(hasattr(__snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(__snake_case , 'do_resize' ) )
self.assertTrue(hasattr(__snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(__snake_case , 'do_pad' ) )
self.assertTrue(hasattr(__snake_case , 'size' ) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __snake_case )
__a =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __snake_case )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# Initialize image_processing
__a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
# Initialize image_processing
__a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
__a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
__a , __a =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __magic_name__ ( self ) -> str:
'''simple docstring'''
# prepare image and target
__a =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__a =json.loads(f.read() )
__a ={'image_id': 3_9769, 'annotations': target}
# encode them
__a =DeformableDetrImageProcessor()
__a =image_processing(images=__snake_case , annotations=__snake_case , return_tensors='pt' )
# verify pixel values
__a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __snake_case )
__a =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __snake_case , atol=1e-4 ) )
# verify area
__a =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __snake_case ) )
# verify boxes
__a =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __snake_case )
__a =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __snake_case , atol=1e-3 ) )
# verify image_id
__a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __snake_case ) )
# verify is_crowd
__a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __snake_case ) )
# verify class_labels
__a =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __snake_case ) )
# verify orig_size
__a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __snake_case ) )
# verify size
__a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __snake_case ) )
@slow
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
# prepare image, target and masks_path
__a =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__a =json.loads(f.read() )
__a ={'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__a =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__a =DeformableDetrImageProcessor(format='coco_panoptic' )
__a =image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='pt' )
# verify pixel values
__a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __snake_case )
__a =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __snake_case , atol=1e-4 ) )
# verify area
__a =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __snake_case ) )
# verify boxes
__a =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __snake_case )
__a =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __snake_case , atol=1e-3 ) )
# verify image_id
__a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __snake_case ) )
# verify is_crowd
__a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __snake_case ) )
# verify class_labels
__a =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __snake_case ) )
# verify masks
__a =82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __snake_case )
# verify orig_size
__a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __snake_case ) )
# verify size
__a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __snake_case ) )
| 308
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=18 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=None , __snake_case=True , ) -> Optional[Any]:
'''simple docstring'''
__a =size if size is not None else {'shortest_edge': 20}
__a =crop_size if crop_size is not None else {'height': 18, 'width': 18}
__a =parent
__a =batch_size
__a =num_channels
__a =image_size
__a =min_resolution
__a =max_resolution
__a =do_resize
__a =size
__a =do_center_crop
__a =crop_size
__a =do_flip_channel_order
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MobileViTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =MobileViTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , 'do_resize' ) )
self.assertTrue(hasattr(__snake_case , 'size' ) )
self.assertTrue(hasattr(__snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(__snake_case , 'center_crop' ) )
self.assertTrue(hasattr(__snake_case , 'do_flip_channel_order' ) )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__a =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# Initialize image_processing
__a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# Initialize image_processing
__a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
__a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__a =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a =image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 308
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = False , ) -> Dict:
'''simple docstring'''
super().__init__()
__a =nn.Embedding(__snake_case , __snake_case )
__a =nn.Embedding(__snake_case , __snake_case )
__a =False
__a =nn.Dropout(p=__snake_case )
__a =TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
__a =nn.ModuleList()
for lyr_num in range(__snake_case ):
__a =TaBlock(__snake_case )
self.encoders.append(__snake_case )
__a =TaLayerNorm(__snake_case )
__a =nn.Dropout(p=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =self.token_embedder(__snake_case )
__a =encoder_input_tokens.shape[1]
__a =torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
__a =self.dropout_pre(__snake_case )
# inverted the attention mask
__a =encoder_input_tokens.size()
__a =self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
__a =lyr(__snake_case , __snake_case )[0]
__a =self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : Any = 256
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['melgan']
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
__a =math.log(1e-5 ) # Matches MelGAN training.
__a =4.0 # Largest value for most examples
__a =128
self.register_modules(
notes_encoder=__snake_case , continuous_encoder=__snake_case , decoder=__snake_case , scheduler=__snake_case , melgan=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case=(-1.0, 1.0) , __snake_case=False ) -> Union[str, Any]:
'''simple docstring'''
__a , __a =output_range
if clip:
__a =torch.clip(__snake_case , self.min_value , self.max_value )
# Scale to [0, 1].
__a =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __magic_name__ ( self , __snake_case , __snake_case=(-1.0, 1.0) , __snake_case=False ) -> Dict:
'''simple docstring'''
__a , __a =input_range
__a =torch.clip(__snake_case , __snake_case , __snake_case ) if clip else outputs
# Scale to [0, 1].
__a =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
__a =input_tokens > 0
__a , __a =self.notes_encoder(
encoder_input_tokens=__snake_case , encoder_inputs_mask=__snake_case )
__a , __a =self.continuous_encoder(
encoder_inputs=__snake_case , encoder_inputs_mask=__snake_case )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =noise_time
if not torch.is_tensor(__snake_case ):
__a =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__snake_case ) and len(timesteps.shape ) == 0:
__a =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__a =self.decoder(
encodings_and_masks=__snake_case , decoder_input_tokens=__snake_case , decoder_noise_time=__snake_case )
return logits
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = None , __snake_case = 100 , __snake_case = True , __snake_case = "numpy" , __snake_case = None , __snake_case = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
__a =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__a =np.zeros([1, 0, self.n_dims] , np.floataa )
__a =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__snake_case , device=self.device )
for i, encoder_input_tokens in enumerate(__snake_case ):
if i == 0:
__a =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__a =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__snake_case , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__a =ones
__a =self.scale_features(
__snake_case , output_range=[-1.0, 1.0] , clip=__snake_case )
__a =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__snake_case , continuous_mask=__snake_case , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__a =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__snake_case , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__snake_case )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__a =self.decode(
encodings_and_masks=__snake_case , input_tokens=__snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__a =self.scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
__a =self.scale_to_features(__snake_case , input_range=[-1.0, 1.0] )
__a =mel[:1]
__a =mel.cpu().float().numpy()
__a =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case )
logger.info('Generated segment' , __snake_case )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__a =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__a =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__snake_case )
| 308
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308
| 1
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple ):
"""simple docstring"""
__a =os.path.abspath(_snake_case )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
__a =tf.train.list_variables(_snake_case )
__a =[]
__a =[]
__a =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a =full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
__a =name[1:]
# figure out how many levels deep the name is
__a =0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(_snake_case )
# read data
__a =tf.train.load_variable(_snake_case , _snake_case )
names.append('/'.join(_snake_case ) )
arrays.append(_snake_case )
logger.info(F'Read a total of {len(_snake_case ):,} layers' )
# Sanity check
if len(set(_snake_case ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(_snake_case ) )})' )
__a =list(set(_snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(_snake_case , _snake_case ):
__a =full_name.split('/' )
__a =model
__a =[]
for i, m_name in enumerate(_snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
__a =int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
__a =getattr(_snake_case , 'embeddings' )
__a =getattr(_snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
__a =getattr(_snake_case , 'encoder' )
__a =getattr(_snake_case , 'layer' )
__a =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
__a =getattr(_snake_case , 'pooler' )
__a =getattr(_snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
__a =getattr(_snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
__a =getattr(_snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
__a =getattr(_snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
__a =getattr(_snake_case , 'token_type_embeddings' )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
__a =getattr(_snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
__a =getattr(_snake_case , 'attention' )
__a =getattr(_snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
__a =getattr(_snake_case , 'attention' )
__a =getattr(_snake_case , 'output' )
__a =getattr(_snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
__a =getattr(_snake_case , 'attention' )
__a =getattr(_snake_case , 'output' )
__a =getattr(_snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
__a =getattr(_snake_case , 'output' )
__a =getattr(_snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
__a =getattr(_snake_case , 'output' )
__a =getattr(_snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
__a =getattr(_snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
__a =getattr(_snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
__a =getattr(_snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
__a =getattr(_snake_case , 'intermediate' )
__a =getattr(_snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
__a =getattr(_snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
__a =getattr(_snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
__a =getattr(_snake_case , 'weight' )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
__a ='.'.join(_snake_case )
if re.match(r'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , _snake_case ) or re.match(
r'(\S+)\.attention\.output\.dense\.weight' , _snake_case ):
__a =array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a =array.transpose()
if pointer.shape == array.shape:
__a =torch.from_numpy(_snake_case )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def UpperCamelCase_( _snake_case : Tuple , _snake_case : int , _snake_case : Optional[int] ):
"""simple docstring"""
logger.info(F'Loading model based on config from {config_path}...' )
__a =BertConfig.from_json_file(_snake_case )
__a =BertModel(_snake_case )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 308
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : str = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
_lowerCAmelCase : int = "▁"
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = BigBirdTokenizer
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = []
def __init__( self , __snake_case=None , __snake_case=None , __snake_case="<unk>" , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<pad>" , __snake_case="[SEP]" , __snake_case="[MASK]" , __snake_case="[CLS]" , **__snake_case , ) -> Tuple:
'''simple docstring'''
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
__a =vocab_file
__a =False if not self.vocab_file else True
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__a =os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 308
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCAmelCase : List[Any] = 256_047
_lowerCAmelCase : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =NllbTokenizer(__snake_case , keep_accents=__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__a =tempfile.mkdtemp()
__a =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__a =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a =tokenizer_r.from_pretrained(__snake_case )
__a =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
__a =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__a =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__a =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , tgt_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a =tokenizer.prepare_seqaseq_batch(
__snake_case , tgt_texts=__snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a =tokenizer.prepare_seqaseq_batch(
src_texts=__snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , __snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =[AddedToken('<special>' , lstrip=__snake_case )]
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_r.encode('Hey this is a <special> token' )
__a =tokenizer_r.encode('<special>' , add_special_tokens=__snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a =self.rust_tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =self.tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , **__snake_case )
__a =tokenizer_p.encode('Hey this is a <special> token' )
__a =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls ) -> Tuple:
'''simple docstring'''
__a =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__a =1
return cls
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
# fmt: off
__a =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__a =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__a =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __snake_case )
__a =10
__a =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =tempfile.mkdtemp()
__a =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__a =NllbTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(__snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='pt' )
__a =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='pt' )
__a =targets['input_ids']
__a =shift_tokens_right(
__snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =True
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__a =False
__a =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 308
| 1
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCAmelCase : Optional[Any] = "bert-base-cased"
_lowerCAmelCase : Optional[Any] = "google/pegasus-xsum"
_lowerCAmelCase : Tuple = [" Sam ate lunch today.", "Sams lunch ingredients."]
_lowerCAmelCase : List[Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
_lowerCAmelCase : Optional[Any] = "patrickvonplaten/t5-tiny-random"
_lowerCAmelCase : List[Any] = "sshleifer/bart-tiny-random"
_lowerCAmelCase : List[str] = "sshleifer/tiny-mbart"
_lowerCAmelCase : Dict = "sshleifer/tiny-marian-en-de"
def UpperCamelCase_( _snake_case : Path , _snake_case : list ):
"""simple docstring"""
__a ='\n'.join(_snake_case )
Path(_snake_case ).open('w' ).writelines(_snake_case )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_snake_case , F'{split}.source' ) , _snake_case )
_dump_articles(os.path.join(_snake_case , F'{split}.target' ) , _snake_case )
return tmp_dir
class __magic_name__ ( lowerCAmelCase_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained(__snake_case )
__a =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in ARTICLES )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in SUMMARIES )
__a =4
__a =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__a , __a ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
__a =SeqaSeqDataset(
__snake_case , data_dir=__snake_case , type_path='train' , max_source_length=__snake_case , max_target_length=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , )
__a =DataLoader(__snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__snake_case , __snake_case )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__a =shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained(__snake_case )
__a =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in ARTICLES )
__a =max(len(tokenizer.encode(__snake_case ) ) for a in SUMMARIES )
__a =4
__a =LegacySeqaSeqDataset(
__snake_case , data_dir=__snake_case , type_path='train' , max_source_length=20 , max_target_length=__snake_case , )
__a =DataLoader(__snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
__a =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__a =tmp_dir.joinpath('train.source' ).open().readlines()
__a =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__snake_case , __snake_case , 128 , __snake_case )
__a ={x.name for x in tmp_dir.iterdir()}
__a ={x.name for x in save_dir.iterdir()}
__a =save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__snake_case ) < len(__snake_case )
assert len(__snake_case ) == 1
assert len(packed_examples[0] ) == sum(len(__snake_case ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
__a , __a , __a =self._get_dataset(max_len=64 )
__a =64
__a =ds.make_dynamic_sampler(__snake_case , required_batch_size_multiple=__snake_case )
__a =[len(__snake_case ) for x in batch_sampler]
assert len(set(__snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__snake_case ) == len(__snake_case ) # no dropped or added examples
__a =DataLoader(__snake_case , batch_sampler=__snake_case , collate_fn=ds.collate_fn , num_workers=2 )
__a =[]
__a =[]
for batch in data_loader:
__a =batch['input_ids'].shape
__a =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__a =np.product(batch['input_ids'].shape )
num_src_per_batch.append(__snake_case )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__snake_case )
assert num_src_per_batch[0] == max(__snake_case )
if failures:
raise AssertionError(f'too many tokens in {len(__snake_case )} batches' )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a , __a , __a =self._get_dataset(max_len=512 )
__a =2
__a =ds.make_sortish_sampler(__snake_case , shuffle=__snake_case )
__a =DataLoader(__snake_case , batch_size=__snake_case , collate_fn=ds.collate_fn , num_workers=2 )
__a =DataLoader(__snake_case , batch_size=__snake_case , collate_fn=ds.collate_fn , num_workers=2 , sampler=__snake_case )
__a =tokenizer.pad_token_id
def count_pad_tokens(__snake_case , __snake_case="input_ids" ):
return [batch[k].eq(__snake_case ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__snake_case , k='labels' ) ) < sum(count_pad_tokens(__snake_case , k='labels' ) )
assert sum(count_pad_tokens(__snake_case ) ) < sum(count_pad_tokens(__snake_case ) )
assert len(__snake_case ) == len(__snake_case )
def __magic_name__ ( self , __snake_case=1000 , __snake_case=128 ) -> Optional[Any]:
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , __snake_case ):
__a ='examples/seq2seq/wmt_en_ro'
__a =max_len * 2 * 64
if not Path(__snake_case ).joinpath('train.len' ).exists():
save_len_file(__snake_case , __snake_case )
else:
__a ='examples/seq2seq/test_data/wmt_en_ro'
__a =max_len * 4
save_len_file(__snake_case , __snake_case )
__a =AutoTokenizer.from_pretrained(__snake_case )
__a =SeqaSeqDataset(
__snake_case , data_dir=__snake_case , type_path='train' , max_source_length=__snake_case , max_target_length=__snake_case , n_obs=__snake_case , )
return ds, max_tokens, tokenizer
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a , __a , __a =self._get_dataset()
__a =set(DistributedSortishSampler(__snake_case , 256 , num_replicas=2 , rank=0 , add_extra_examples=__snake_case ) )
__a =set(DistributedSortishSampler(__snake_case , 256 , num_replicas=2 , rank=1 , add_extra_examples=__snake_case ) )
assert idsa.intersection(__snake_case ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
__a =AutoTokenizer.from_pretrained(__snake_case , use_fast=__snake_case )
if tok_name == MBART_TINY:
__a =SeqaSeqDataset(
__snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
__a =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__a =SeqaSeqDataset(
__snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
__a =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__snake_case ) == 1 if tok_name == BART_TINY else len(__snake_case ) == 0
| 308
|
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 1
|
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =len(_snake_case )
__a =sum(_snake_case )
__a =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__a =True
for i in range(1 , s + 1 ):
__a =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__a =dp[i][j - 1]
if arr[i - 1] <= j:
__a =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__a =s - 2 * j
break
return diff
| 308
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__a =mock.Mock()
__a =500
__a ={}
__a =HTTPError
__a ={}
# Download this model to make sure it's in the cache.
__a =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__snake_case ) as mock_head:
__a =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__a =WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a =TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def __magic_name__ ( cls ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =WavaVecaFeatureExtractor.from_pretrained(__snake_case )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
__a =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__snake_case , repo_id='test-feature-extractor' , push_to_hub=__snake_case , use_auth_token=self._token )
__a =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =WavaVecaFeatureExtractor.from_pretrained(__snake_case )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
__a =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__snake_case , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=__snake_case , use_auth_token=self._token )
__a =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__a =CustomFeatureExtractor.from_pretrained(__snake_case )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
__a =AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 308
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 1
|
def UpperCamelCase_( _snake_case : list , _snake_case : list , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if index == number_of_items:
return 0
__a =0
__a =0
__a =knapsack(_snake_case , _snake_case , _snake_case , _snake_case , index + 1 )
if weights[index] <= max_weight:
__a =values[index] + knapsack(
_snake_case , _snake_case , _snake_case , max_weight - weights[index] , index + 1 )
return max(_snake_case , _snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[int] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase : Tuple = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowerCAmelCase : Optional[int] = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowerCAmelCase : Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase_( _snake_case : tuple ):
"""simple docstring"""
return x[0]
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_letter_count(_snake_case )
__a ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_snake_case )
__a ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_snake_case )
__a =''.join(freq_to_letter[freq] )
__a =list(freq_to_letter_str.items() )
freq_pairs.sort(key=_snake_case , reverse=_snake_case )
__a =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(_snake_case )
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_frequency_order(_snake_case )
__a =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
__a =(
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , __snake_case , standard_warn=__snake_case )
__a =dict(scheduler.config )
__a =1
__a =FrozenDict(__snake_case )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
__a =(
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , __snake_case , standard_warn=__snake_case )
__a =dict(scheduler.config )
__a =True
__a =FrozenDict(__snake_case )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=__snake_case , segmentation_processor=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , )
def __magic_name__ ( self , __snake_case = "auto" ) -> Dict:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
self.enable_attention_slicing(__snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__a =torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case = 512 , __snake_case = 512 , __snake_case = 50 , __snake_case = 7.5 , __snake_case = None , __snake_case = 1 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , **__snake_case , ) -> Dict:
'''simple docstring'''
__a =self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
__a =self.segmentation_model(**__snake_case )
__a =torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a =self.numpy_to_pil(__snake_case )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a =StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , )
| 308
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 1
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
@staticmethod
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> str:
'''simple docstring'''
raise NotImplementedError
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def __magic_name__ ( cls ) -> Optional[int]:
'''simple docstring'''
return f'`pip install {cls.pip_package or cls.name}`'
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'optuna'
@staticmethod
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return is_optuna_available()
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__snake_case , __snake_case , __snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'ray'
SCREAMING_SNAKE_CASE = '\'ray[tune]\''
@staticmethod
def __magic_name__ ( ) -> str:
'''simple docstring'''
return is_ray_available()
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> Dict:
'''simple docstring'''
return run_hp_search_ray(__snake_case , __snake_case , __snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> Tuple:
'''simple docstring'''
return default_hp_space_ray(__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'sigopt'
@staticmethod
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> int:
'''simple docstring'''
return run_hp_search_sigopt(__snake_case , __snake_case , __snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'wandb'
@staticmethod
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
return is_wandb_available()
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> int:
'''simple docstring'''
return run_hp_search_wandb(__snake_case , __snake_case , __snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
return default_hp_space_wandb(__snake_case )
_lowerCAmelCase : Any = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase_( ):
"""simple docstring"""
__a =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_snake_case ) > 0:
__a =available_backends[0].name
if len(_snake_case ) > 1:
logger.info(
F'{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 308
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 1
|
import requests
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a ={'Content-Type': 'application/json'}
__a =requests.post(_snake_case , json={'text': message_body} , headers=_snake_case )
if response.status_code != 200:
__a =(
'Request to slack returned an error '
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(_snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 308
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 1
|
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase_( _snake_case : Dict , _snake_case : List[Any] , _snake_case : Optional[Any]=None , **_snake_case : Optional[int] ):
"""simple docstring"""
__a =[x.strip() for x in open(_snake_case ).readlines()]
__a =[x.strip() for x in open(_snake_case ).readlines()][: len(_snake_case )]
__a =calculate_rouge(_snake_case , _snake_case , **_snake_case )
if save_path is not None:
save_json(_snake_case , _snake_case , indent=_snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.