code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_lowerCamelCase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_lowerCamelCase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_lowerCamelCase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_lowerCamelCase , default='''data/dump''' , help='''The dump file prefix.''' )
__snake_case = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__snake_case = BertTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__snake_case = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__snake_case = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__snake_case = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__snake_case = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'''{len(_lowerCamelCase )} examples to process.''' )
__snake_case = []
__snake_case = 0
__snake_case = 1_00_00
__snake_case = time.time()
for text in data:
__snake_case = f'''{bos} {text.strip()} {sep}'''
__snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
__snake_case = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__snake_case = time.time()
logger.info('''Finished binarization''' )
logger.info(f'''{len(_lowerCamelCase )} examples processed.''' )
__snake_case = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__snake_case = tokenizer.vocab_size
if vocab_size < (1 << 16):
__snake_case = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
__snake_case = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(_lowerCamelCase , '''wb''' ) as handle:
pickle.dump(rslt_ , _lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 24 | '''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
__UpperCAmelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__UpperCAmelCase = '''.'''.join(UpperCamelCase__ )
return test_module_path
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_module_path(UpperCamelCase__ )
__UpperCAmelCase = importlib.import_module(UpperCamelCase__ )
return test_module
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
__UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__UpperCAmelCase = getattr(UpperCamelCase__ , '''all_model_classes''' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = test_class()
if hasattr(UpperCamelCase__ , '''setUp''' ):
test.setUp()
__UpperCAmelCase = None
if hasattr(UpperCamelCase__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__UpperCAmelCase = test.model_tester.__class__
return model_tester
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = []
for test_class in test_classes:
__UpperCAmelCase = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_model_classes(UpperCamelCase__ )
__UpperCAmelCase = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = get_model_classes(UpperCamelCase__ )
__UpperCAmelCase = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 262 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = '▁'
snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
snake_case = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
snake_case = {
'facebook/xglm-564M': 2_0_4_8,
}
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Any = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__ = None , **lowercase__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
SCREAMING_SNAKE_CASE = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE = len(self.sp_model )
SCREAMING_SNAKE_CASE = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase__ )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self , lowercase__ , lowercase__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def A ( self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ ))
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ ))
def A ( self , lowercase__ , lowercase__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def A ( self ) -> List[str]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self , lowercase__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def A ( self , lowercase__ ) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self , lowercase__ ) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self , lowercase__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''.join(lowercase__ ).replace(lowercase__ , ' ' ).strip()
return out_string
def A ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 406 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE = 1_9_2
SCREAMING_SNAKE_CASE = 7_6_8
SCREAMING_SNAKE_CASE = 1_2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = [8_0_0, 1_3_3_3]
SCREAMING_SNAKE_CASE = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE = 3_3_0
SCREAMING_SNAKE_CASE = 1_4
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1_3_2_0
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 1_2
SCREAMING_SNAKE_CASE = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE = [8_0_0, 1_3_4_4]
SCREAMING_SNAKE_CASE = 9_1
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'coco-detection-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if "backbone" in name:
SCREAMING_SNAKE_CASE = name.replace('backbone', 'vit' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE = name.replace('cls_token', 'embeddings.cls_token' )
if "det_token" in name:
SCREAMING_SNAKE_CASE = name.replace('det_token', 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('mid_pos_embed', 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed', 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks', 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn', 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2', 'output.dense' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('class_embed', 'class_labels_classifier' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('bbox_embed', 'bbox_predictor' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('vit.norm', 'vit.layernorm' )
return name
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[2] )
SCREAMING_SNAKE_CASE = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def UpperCamelCase_ ( ):
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_, stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ):
SCREAMING_SNAKE_CASE = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_, map_location='cpu' )['model']
# load 🤗 model
SCREAMING_SNAKE_CASE = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE = 8_0_0 if yolos_name != 'yolos_ti' else 5_1_2
SCREAMING_SNAKE_CASE = YolosImageProcessor(format='coco_detection', size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img(), return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
SCREAMING_SNAKE_CASE = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_, organization='hustvl' )
model.push_to_hub(SCREAMING_SNAKE_CASE_, organization='hustvl' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 406 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : List[Any] = args.pruning_method
snake_case_ : Any = args.threshold
snake_case_ : Optional[Any] = args.model_name_or_path.rstrip("""/""" )
snake_case_ : Optional[Any] = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
snake_case_ : str = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
snake_case_ : Optional[int] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case_ : Dict = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
snake_case_ : List[Any] = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
snake_case_ : Tuple = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
snake_case_ : List[Any] = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case_ : List[str] = name[:-6]
snake_case_ : int = model[f'{prefix_}mask_scores']
snake_case_ : Optional[Any] = TopKBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case_ : str = name[:-6]
snake_case_ : str = model[f'{prefix_}mask_scores']
snake_case_ : List[str] = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case_ : List[Any] = name[:-6]
snake_case_ : Optional[int] = model[f'{prefix_}mask_scores']
snake_case_ , snake_case_ : List[str] = -0.1, 1.1
snake_case_ : Optional[int] = torch.sigmoid(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = s * (r - l) + l
snake_case_ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
snake_case_ : List[str] = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
snake_case_ : int = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE__ ) , f'bertarized_{os.path.basename(SCREAMING_SNAKE_CASE__ )}' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
shutil.copytree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'\nCreated folder {target_model_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
a_ = parser.parse_args()
main(args)
| 480 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
snake_case_ , snake_case_ : Union[str, Any] = 1, 1
for _ in range(number_of_steps - 1 ):
snake_case_ , snake_case_ : int = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
class __snake_case( __A ):
_A = '''encoder-decoder'''
_A = True
def __init__( self , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_SCREAMING_SNAKE_CASE = kwargs.pop('''encoder''' )
_SCREAMING_SNAKE_CASE = encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE = kwargs.pop('''decoder''' )
_SCREAMING_SNAKE_CASE = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE = AutoConfig.for_model(A_ , **A_ )
_SCREAMING_SNAKE_CASE = AutoConfig.for_model(A_ , **A_ )
_SCREAMING_SNAKE_CASE = True
@classmethod
def A ( cls , A_ , A_ , **A_ ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.encoder.to_dict()
_SCREAMING_SNAKE_CASE = self.decoder.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 168 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCamelCase : Any = """facebook/wmt19-en-de"""
lowerCamelCase : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCamelCase : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCamelCase : Dict = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCamelCase : Dict = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCamelCase : Tuple = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
lowerCamelCase : Optional[int] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 168 | 1 |
'''simple docstring'''
from math import isqrt
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] = 10**6 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 507 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _a : Any , _a : Optional[Any] , _a : str=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
UpperCAmelCase_ : Any = nn.Parameter(_a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
UpperCAmelCase_ : Any = nn.Parameter(_a )
def lowerCamelCase_ ( _a : str , _a : Dict , _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = np.asarray(weights[0] )
UpperCAmelCase_ : Optional[Any] = np.asarray(weights[1] )
UpperCAmelCase_ : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( _a : Dict , _a : Tuple , _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Any = np.asarray(weights[0] )
UpperCAmelCase_ : Union[str, Any] = np.asarray(weights[1] )
UpperCAmelCase_ : Optional[int] = np.asarray(weights[2] )
UpperCAmelCase_ : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , )
set_param(
torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase_ ( _a : str , _a : str , _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = weights[0][0][0]
UpperCAmelCase_ : Dict = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# lsh weights + output
UpperCAmelCase_ : Dict = weights[0][1]
if len(_a ) < 4:
set_layer_weights_in_torch_lsh(_a , torch_block.attention , _a )
else:
set_layer_weights_in_torch_local(_a , torch_block.attention , _a )
# intermediate weighs
UpperCAmelCase_ : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(_a ) == 4:
UpperCAmelCase_ : Union[str, Any] = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# intermediate dense
UpperCAmelCase_ : Union[str, Any] = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
# intermediate out
UpperCAmelCase_ : List[Any] = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ : Tuple = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def lowerCamelCase_ ( _a : Dict , _a : List[Any] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = torch_model.reformer
# word embeds
UpperCAmelCase_ : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_a ) , )
if isinstance(weights[3] , _a ):
UpperCAmelCase_ : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
UpperCAmelCase_ : List[str] = nn.Parameter(torch.tensor(_a ) )
UpperCAmelCase_ : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_a , _a , _a )
# output layer norm
UpperCAmelCase_ : str = np.asarray(weights[7][0] )
UpperCAmelCase_ : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , )
# output embeddings
UpperCAmelCase_ : str = np.asarray(weights[9][0] )
UpperCAmelCase_ : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , )
def lowerCamelCase_ ( _a : int , _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ReformerConfig.from_json_file(_a )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ : List[Any] = ReformerModelWithLMHead(_a )
with open(_a , """rb""" ) as f:
UpperCAmelCase_ : Union[str, Any] = pickle.load(_a )["""weights"""]
set_model_weights_in_torch(_a , _a , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 322 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ :Tuple = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Dict = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[str] = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 618 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ :List[Any] = {
'''sample_size''': 3_2,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [3_2, 6_4],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :List[str] = {
'''sample_size''': 6_4,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :Any = {
'''sample_size''': 2_5_6,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :Optional[int] = {
'''num_train_timesteps''': 4_0,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ :Optional[int] = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ :Any = {
'''num_train_timesteps''': 1_5_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
if isinstance(a__ , a__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCAmelCase__ ( a__: Dict , a__: int , a__: Union[str, Any] , a__: Dict , a__: Optional[int]=False ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCAmelCase = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCAmelCase__ ( a__: Any , a__: Any , a__: List[str] , a__: List[Any] , a__: List[str]=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCAmelCase = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCAmelCase = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCAmelCase__ ( a__: str , a__: Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = torch.load(a__ , map_location='cpu' )
_UpperCAmelCase = {}
_UpperCAmelCase = checkpoint['time_embed.0.weight']
_UpperCAmelCase = checkpoint['time_embed.0.bias']
_UpperCAmelCase = checkpoint['time_embed.2.weight']
_UpperCAmelCase = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCAmelCase = checkpoint['label_emb.weight']
_UpperCAmelCase = checkpoint['input_blocks.0.0.weight']
_UpperCAmelCase = checkpoint['input_blocks.0.0.bias']
_UpperCAmelCase = unet_config['down_block_types']
_UpperCAmelCase = unet_config['layers_per_block']
_UpperCAmelCase = unet_config['attention_head_dim']
_UpperCAmelCase = unet_config['block_out_channels']
_UpperCAmelCase = 1
_UpperCAmelCase = channels_list[0]
for i, layer_type in enumerate(a__ ):
_UpperCAmelCase = channels_list[i]
_UpperCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(a__ ):
_UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(a__ ):
_UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
_UpperCAmelCase = F'''down_blocks.{i}.attentions.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.1'''
_UpperCAmelCase = convert_attention(
a__ , a__ , a__ , a__ , a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''down_blocks.{i}.downsamplers.0'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
current_layer += 1
_UpperCAmelCase = current_channels
# hardcoded the mid-block for now
_UpperCAmelCase = 'mid_block.resnets.0'
_UpperCAmelCase = 'middle_block.0'
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = 'mid_block.attentions.0'
_UpperCAmelCase = 'middle_block.1'
_UpperCAmelCase = convert_attention(a__ , a__ , a__ , a__ , a__ )
_UpperCAmelCase = 'mid_block.resnets.1'
_UpperCAmelCase = 'middle_block.2'
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = 0
_UpperCAmelCase = unet_config['up_block_types']
for i, layer_type in enumerate(a__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0'''
_UpperCAmelCase = F'''output_blocks.{current_layer-1}.1'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
_UpperCAmelCase = F'''up_blocks.{i}.attentions.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.1'''
_UpperCAmelCase = convert_attention(
a__ , a__ , a__ , a__ , a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0'''
_UpperCAmelCase = F'''output_blocks.{current_layer-1}.2'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = checkpoint['out.0.weight']
_UpperCAmelCase = checkpoint['out.0.bias']
_UpperCAmelCase = checkpoint['out.2.weight']
_UpperCAmelCase = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ :Dict = parser.parse_args()
lowerCAmelCase__ :List[str] = strabool(args.class_cond)
lowerCAmelCase__ :Any = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ :Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ :Tuple = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ :Any = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowerCAmelCase__ :str = None
lowerCAmelCase__ :Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ :Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ :int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ :Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ :Tuple = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowerCAmelCase__ :Any = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ :Union[str, Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 618 | 1 |
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[int] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1E-2
@property
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def __lowerCAmelCase ( self ) ->str:
return (3, 32, 32)
@property
def __lowerCAmelCase ( self ) ->Dict:
return (3, 32, 32)
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) ->Dict:
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __lowerCAmelCase ( self ) ->Dict:
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE : Tuple = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn_like(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE : str = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE : Any = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE : Tuple = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
SCREAMING_SNAKE_CASE : List[Any] = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE : Optional[int] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
SCREAMING_SNAKE_CASE : Dict = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE : List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
SCREAMING_SNAKE_CASE : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def __lowerCAmelCase ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = '''fp16''' if fpaa else None
SCREAMING_SNAKE_CASE : Dict = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def __lowerCAmelCase ( self , _lowerCamelCase=0 ) ->Optional[int]:
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : str = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : int = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.encode(_lowerCamelCase ).latent_dist
SCREAMING_SNAKE_CASE : int = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 333 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
class lowercase_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
lowerCamelCase_ = None
lowerCamelCase_ = None
class lowercase_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
lowerCamelCase_ = datasets.Audio()
lowerCamelCase_ = "audio"
lowerCamelCase_ = AudioFolderConfig
lowerCamelCase_ = 42 # definition at the bottom of the script
lowerCamelCase_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
lowerCamelCase_ = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCamelCase_ = AUDIO_EXTENSIONS
| 418 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
snake_case__ : Optional[int] = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
snake_case__ : Dict = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
snake_case__ : int = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
snake_case__ : Union[str, Any] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
snake_case__ : Union[str, Any] = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
for tf_name, hf_name in patterns:
UpperCAmelCase__ = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = BigBirdPegasusConfig(**_lowerCAmelCase )
UpperCAmelCase__ = BigBirdPegasusForConditionalGeneration(_lowerCAmelCase )
UpperCAmelCase__ = torch_model.state_dict()
UpperCAmelCase__ = {}
# separating decoder weights
UpperCAmelCase__ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
UpperCAmelCase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
UpperCAmelCase__ = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
UpperCAmelCase__ = DECODER_PATTERNS
UpperCAmelCase__ = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.from_numpy(_lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
UpperCAmelCase__ = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
UpperCAmelCase__ = REMAINING_PATTERNS
UpperCAmelCase__ = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.from_numpy(_lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
UpperCAmelCase__ = mapping["""model.embed_positions.weight"""]
UpperCAmelCase__ = mapping.pop("""model.embed_positions.weight""" )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
UpperCAmelCase__ = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = tf.train.list_variables(_lowerCAmelCase )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ["""global_step"""]
for name, shape in tqdm(_lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = array
return tf_weights
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = get_tf_weights_as_numpy(_lowerCAmelCase )
UpperCAmelCase__ = convert_bigbird_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case__ : Any = parser.parse_args()
snake_case__ : List[str] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 392 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowercase = '\\n\n'
_lowercase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
_lowercase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def UpperCamelCase ( self , A__ , A__ , A__ = 16 , A__ = True , A__=None ) -> Optional[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case = '''cuda'''
else:
snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case = AutoModelForCausalLM.from_pretrained(A__ )
snake_case = model.to(A__ )
snake_case = AutoTokenizer.from_pretrained(A__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case = model.config.max_length - 1
else:
snake_case = model.config.max_length
snake_case = tokenizer(
A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , return_tensors='''pt''' , return_attention_mask=A__ , ).to(A__ )
snake_case = encodings['''input_ids''']
snake_case = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case = []
snake_case = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(A__ ) , A__ ) ):
snake_case = min(start_index + batch_size , len(A__ ) )
snake_case = encoded_texts[start_index:end_index]
snake_case = attn_masks[start_index:end_index]
if add_start_token:
snake_case = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A__ )
snake_case = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A__ ), attn_mask] , dim=1 )
snake_case = encoded_batch
with torch.no_grad():
snake_case = model(A__ , attention_mask=A__ ).logits
snake_case = out_logits[..., :-1, :].contiguous()
snake_case = labels[..., 1:].contiguous()
snake_case = attn_mask[..., 1:].contiguous()
snake_case = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A__ )}
| 44 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44 | 1 |
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def A ( lowercase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Dict = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowercase__ )
UpperCamelCase__ :Any = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
UpperCamelCase__ :Optional[Any] = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase__ :int = b"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
UpperCamelCase__ :List[Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def A ( lowercase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Dict = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
UpperCamelCase__ :List[str] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
UpperCamelCase__ :int = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase__ :int = encoded_data[:-padding]
UpperCamelCase__ :Optional[int] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase__ :List[str] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase__ :Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : list ) -> None:
__snake_case : str = set_counts
__snake_case : Union[str, Any] = max(lowerCamelCase )
__snake_case : List[Any] = len(lowerCamelCase )
__snake_case : Tuple = [1] * num_sets
__snake_case : Dict = list(range(lowerCamelCase ) )
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
__snake_case : List[Any] = self.get_parent(lowerCamelCase )
__snake_case : Tuple = self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case : List[str] = 0
__snake_case : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = src_parent
__snake_case : Tuple = self.set_counts[src_parent]
__snake_case : str = max(self.max_set , lowerCamelCase )
return True
def __snake_case ( self : int , lowerCamelCase : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 81 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Tuple =[1]
_a : List[Any] =0, 0, 0
_a : List[str] =ugly_nums[ia] * 2
_a : Optional[Any] =ugly_nums[ia] * 3
_a : Dict =ugly_nums[ia] * 5
for _ in range(1 ,_UpperCAmelCase ):
_a : Any =min(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
ugly_nums.append(_UpperCAmelCase )
if next_num == next_a:
ia += 1
_a : List[str] =ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_a : Optional[Any] =ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_a : Optional[Any] =ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 719 |
'''simple docstring'''
A__: Dict = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 506 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'open-llama'
def __init__( self ,__UpperCamelCase=10_0000 ,__UpperCamelCase=4096 ,__UpperCamelCase=1_1008 ,__UpperCamelCase=32 ,__UpperCamelCase=32 ,__UpperCamelCase="silu" ,__UpperCamelCase=2048 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=True ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,__UpperCamelCase=2 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : int = vocab_size
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = intermediate_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = hidden_act
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[Any] = rms_norm_eps
lowercase_ : Any = use_cache
lowercase_ : Tuple = kwargs.pop(
'use_memorry_efficient_attention' ,__UpperCamelCase )
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Tuple = attention_dropout_prob
lowercase_ : Optional[int] = use_stable_embedding
lowercase_ : Tuple = shared_input_output_embedding
lowercase_ : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,tie_word_embeddings=__UpperCamelCase ,**__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,__UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowercase_ : List[Any] = self.rope_scaling.get('type' ,__UpperCamelCase )
lowercase_ : List[Any] = self.rope_scaling.get('factor' ,__UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase ,__UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 425 | """simple docstring"""
from math import sqrt
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
lowercase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(__SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase_ : Union[str, Any] = False
break
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase_ : int = list(range(2 , n + 1 ) )
lowercase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(__SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase_ : List[str] = 0
# filters actual prime numbers.
lowercase_ : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
lowercase_ : Optional[int] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__SCREAMING_SNAKE_CASE ):
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
lowercase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowercase_ : Union[str, Any] = 2
lowercase_ : int = number
if number == 0 or number == 1:
ans.append(__SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(__SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(__SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowercase_ : Tuple = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = max(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ : str = 0
# prime factorization of 'number'
lowercase_ : str = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = min(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(__SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
lowercase_ : Optional[int] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase_ : Dict = get_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : int = len(__SCREAMING_SNAKE_CASE )
# run variable for while-loops.
lowercase_ : str = 0
lowercase_ : str = None
# exit variable. for break up the loops
lowercase_ : str = True
while i < len_pn and loop:
lowercase_ : Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase_ : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (len(__SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase_ : Tuple = 0
while numbera != 0:
lowercase_ : List[str] = numbera % numbera
lowercase_ : str = numbera
lowercase_ : int = rest
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase_ : int = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = prime_factorization(__SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
lowercase_ : str = []
lowercase_ : Optional[int] = []
lowercase_ : int = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : List[Any] = 0
lowercase_ : int = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase_ : Tuple = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
lowercase_ : int = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
lowercase_ : Dict = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase_ : Any = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
lowercase_ : List[Any] = 0
lowercase_ : Any = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and is_prime(
__SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
assert (
is_prime(__SCREAMING_SNAKE_CASE ) and is_prime(__SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowercase_ : int = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(__SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(__SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
lowercase_ : str = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(__SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase_ : Any = get_divisors(__SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(__SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase_ : List[Any] = gcd(abs(__SCREAMING_SNAKE_CASE ) , abs(__SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
lowercase_ : Union[str, Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
lowercase_ : List[Any] = 0
lowercase_ : Optional[int] = 1
lowercase_ : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase_ : Optional[Any] = ans
ans += fiba
lowercase_ : List[Any] = tmp
return ans
| 425 | 1 |
import pytest
UpperCAmelCase_ : int = "__dummy_dataset1__"
UpperCAmelCase_ : Any = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowerCAmelCase_ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCAmelCase_ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =dataset_loading_script_name
__magic_name__ : Union[str, Any] =tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
__magic_name__ : Optional[Any] =script_dir / F"{script_name}.py"
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 367 |
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =[]
__magic_name__ : int =[]
__magic_name__ : str ={
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__magic_name__ : Dict =len(lowerCamelCase ) if (len(lowerCamelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowerCamelCase ) , """Postfix""".center(lowerCamelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase ) == 0:
stack.append(lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
while len(lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowerCamelCase ) # return Postfix as str
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Union[str, Any] =list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase ) ):
if infix[i] == "(":
__magic_name__ : str =""")""" # change "(" to ")"
elif infix[i] == ")":
__magic_name__ : int ="""(""" # change ")" to "("
return (infix_2_postfix("""""".join(lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
UpperCAmelCase_ : Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 367 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase =imread(R"digital_image_processing/image_data/lena_small.jpg")
__lowerCAmelCase =cvtColor(img, COLOR_BGR2GRAY)
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = cn.convert_to_negative(_lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __UpperCamelCase ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCAmelCase , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCAmelCase = canny.canny(_lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(_lowerCAmelCase , 5 , sigma=0.9 ).all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCAmelCase = conv.img_convolve(_lowerCAmelCase , _lowerCAmelCase ).astype(_lowerCAmelCase )
assert res.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert med.median_filter(_lowerCAmelCase , 3 ).any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = sob.sobel_filter(_lowerCAmelCase )
assert grad.any() and theta.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = sp.make_sepia(_lowerCAmelCase , 20 )
assert sepia.all()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
UpperCAmelCase = bs.Burkes(imread(_lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
UpperCAmelCase = rs.NearestNeighbour(imread(_lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCAmelCase = imread(_lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = image[x_coordinate][y_coordinate]
UpperCAmelCase = lbp.get_neighbors_pixel(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCAmelCase = lbp.local_binary_value(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert lbp_image.any()
| 333 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]="attention" )-> int:
A_ = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
A_ = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
A_ = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
A_ = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def lowerCAmelCase ( snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[Any]=False )-> List[Any]:
if split_mlp_wi:
A_ = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
A_ = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
A_ = (wi_a, wi_a)
else:
A_ = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
A_ = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def lowerCAmelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] )-> int:
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def lowerCAmelCase ( snake_case__ : dict , *, snake_case__ : int , snake_case__ : bool )-> Union[str, Any]:
A_ = traverse_util.flatten_dict(variables["target"] )
A_ = {"/".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A_ = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , snake_case__ )
A_ = collections.OrderedDict()
# Shared embeddings.
A_ = old["token_embedder/embedding"]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_attention_layer_norm" )
A_ , A_ , A_ , A_ = tax_attention_lookup(snake_case__ , snake_case__ , "encoder" , "attention" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 1 (MLP).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_mlp_layer_norm" )
A_ , A_ = tax_mlp_lookup(snake_case__ , snake_case__ , "encoder" , snake_case__ )
A_ = layer_norm
if split_mlp_wi:
A_ = wi[0].T
A_ = wi[1].T
else:
A_ = wi.T
A_ = wo.T
A_ = old[
"encoder/relpos_bias/rel_embedding"
].T
A_ = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_self_attention_layer_norm" )
A_ , A_ , A_ , A_ = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "self_attention" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 1 (Cross Attention).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_cross_attention_layer_norm" )
A_ , A_ , A_ , A_ = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "encoder_decoder_attention" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 2 (MLP).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_mlp_layer_norm" )
A_ , A_ = tax_mlp_lookup(snake_case__ , snake_case__ , "decoder" , snake_case__ )
A_ = layer_norm
if split_mlp_wi:
A_ = wi[0].T
A_ = wi[1].T
else:
A_ = wi.T
A_ = wo.T
A_ = old["decoder/decoder_norm/scale"]
A_ = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A_ = old["decoder/logits_dense/kernel"].T
return new
def lowerCAmelCase ( snake_case__ : int , snake_case__ : bool )-> Any:
A_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
A_ = state_dict["shared.weight"]
return state_dict
def lowerCAmelCase ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int )-> List[Any]:
A_ = checkpoints.load_tax_checkpoint(snake_case__ )
A_ = convert_tax_to_pytorch(snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ )
A_ = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def lowerCAmelCase ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : bool = False )-> Optional[int]:
A_ = TaConfig.from_json_file(snake_case__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A_ = TaEncoderModel(snake_case__ )
else:
A_ = TaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("Done" )
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__magic_name__ : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 712 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = """umt5"""
lowerCAmelCase_ = ["""past_key_values"""]
def __init__( self , __UpperCamelCase=250112 , __UpperCamelCase=512 , __UpperCamelCase=64 , __UpperCamelCase=1024 , __UpperCamelCase=8 , __UpperCamelCase=None , __UpperCamelCase=6 , __UpperCamelCase=32 , __UpperCamelCase=128 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=1.0 , __UpperCamelCase="gated-gelu" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="T5Tokenizer" , __UpperCamelCase=True , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=0 , **__UpperCamelCase , ):
super().__init__(
is_encoder_decoder=__UpperCamelCase , tokenizer_class=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
A_ = vocab_size
A_ = d_model
A_ = d_kv
A_ = d_ff
A_ = num_layers
A_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ = num_heads
A_ = relative_attention_num_buckets
A_ = relative_attention_max_distance
A_ = dropout_rate
A_ = layer_norm_epsilon
A_ = initializer_factor
A_ = feed_forward_proj
A_ = use_cache
A_ = self.feed_forward_proj.split("-" )
A_ = act_info[-1]
A_ = act_info[0] == "gated"
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
A_ = "gelu_new"
@property
def lowercase_ ( self ):
return self.d_model
@property
def lowercase_ ( self ):
return self.num_heads
@property
def lowercase_ ( self ):
return self.num_layers
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowercase_ ( self ):
A_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
A_ = "past_encoder_sequence + sequence"
A_ = {0: "batch"}
A_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A_ = {0: "batch", 1: "decoder_sequence"}
A_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowercase_ ( self ):
return 13
@property
def lowercase_ ( self ):
return 5E-4
| 608 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( __UpperCAmelCase="" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return os.path.join(__UpperCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.rand(12 ,dtype=torch.floataa ) - 0.5
__SCREAMING_SNAKE_CASE = AgentAudio(lowerCamelCase )
__SCREAMING_SNAKE_CASE = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase ,agent_type.to_raw() ,atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase ,torch.tensor(lowerCamelCase ) ,atol=1E-4 ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.rand(12 ,dtype=torch.floataa ) - 0.5
__SCREAMING_SNAKE_CASE = get_new_path(suffix=""".wav""" )
sf.write(lowerCamelCase ,lowerCamelCase ,1_6000 )
__SCREAMING_SNAKE_CASE = AgentAudio(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase ,agent_type.to_raw() ,atol=1E-4 ) )
self.assertEqual(agent_type.to_string() ,lowerCamelCase )
@require_vision
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.randint(0 ,256 ,(64, 64, 3) )
__SCREAMING_SNAKE_CASE = AgentImage(lowerCamelCase )
__SCREAMING_SNAKE_CASE = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase ,agent_type._tensor ,atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__SCREAMING_SNAKE_CASE = Image.open(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AgentImage(lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__SCREAMING_SNAKE_CASE = Image.open(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AgentImage(lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """Hey!"""
__SCREAMING_SNAKE_CASE = AgentText(lowerCamelCase )
self.assertEqual(lowerCamelCase ,agent_type.to_string() )
self.assertEqual(lowerCamelCase ,agent_type.to_raw() )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
| 109 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a =None
a =logging.get_logger(__name__)
a ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
a ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
a ='▁'
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = AlbertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="[CLS]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<unk>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<pad>" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase__ =(
AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase , normalized=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase )
else mask_token
)
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
lowerCamelCase__ =do_lower_case
lowerCamelCase__ =remove_space
lowerCamelCase__ =keep_accents
lowerCamelCase__ =vocab_file
lowerCamelCase__ =False if not self.vocab_file else True
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ =os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 530 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Optional[Any]=37 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=5_12 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Union[str, Any]=4 , ) -> List[Any]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_attention_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_choices
def UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_attention_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self : Optional[Any] ) -> str:
UpperCAmelCase_ = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self : int ) -> Union[str, Any]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
UpperCAmelCase_ = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
| 407 |
import re
from filelock import FileLock
try:
import nltk
_lowerCamelCase : Any = True
except (ImportError, ModuleNotFoundError):
_lowerCamelCase : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCAmelCase ( __magic_name__ :str ):
re.sub('''<n>''' , '''''' , __magic_name__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__magic_name__ ) )
| 407 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__( self : Union[str, Any] , __a : Any = 7_6_8 , ) -> Any:
super().__init__()
__UpperCAmelCase = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
__UpperCAmelCase = nn.Parameter(torch.ones(1 , UpperCamelCase__ ) )
def snake_case__ ( self : Dict , __a : Optional[int] = None , __a : Any = None , ) -> List[str]:
__UpperCAmelCase = nn.Parameter(self.mean.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
__UpperCAmelCase = nn.Parameter(self.std.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
return self
def snake_case__ ( self : Union[str, Any] , __a : Tuple ) -> List[str]:
__UpperCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case__ ( self : int , __a : Optional[Any] ) -> Tuple:
__UpperCAmelCase = (embeds * self.std) + self.mean
return embeds
| 262 |
"""simple docstring"""
def __a ( A = 10 ) -> str:
'''simple docstring'''
if not isinstance(A , A ) or n < 0:
raise ValueError("Invalid input" )
A__ = 10**n
A__ = 28_433 * (pow(2 , 7_830_457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''') | 337 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __a ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self , UpperCamelCase__ = 768 , ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Parameter(torch.ones(1 , UpperCamelCase__ ) )
def __snake_case ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Parameter(self.mean.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ : Tuple = nn.Parameter(self.std.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
return self
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = (embeds * self.std) + self.mean
return embeds | 720 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __a ( __A ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = eval_examples
SCREAMING_SNAKE_CASE_ : int = post_process_function
def __snake_case ( self , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__ = "eval" , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : int = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : Tuple = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = gen_kwargs
SCREAMING_SNAKE_CASE_ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : int = self.get_eval_dataloader(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[Any] = eval_loop(
UpperCamelCase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_metrics
SCREAMING_SNAKE_CASE_ : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ : List[str] = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" , **UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = gen_kwargs.copy()
SCREAMING_SNAKE_CASE_ : Any = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time()
SCREAMING_SNAKE_CASE_ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[str] = eval_loop(
UpperCamelCase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = compute_metrics
SCREAMING_SNAKE_CASE_ : List[str] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : Optional[int] = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 'predict' )
SCREAMING_SNAKE_CASE_ : List[Any] = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ ) | 97 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A (__magic_name__ ):
snake_case :Optional[int] = ["image_processor", "tokenizer"]
snake_case :List[str] = "BlipImageProcessor"
snake_case :Optional[Any] = "AutoTokenizer"
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.image_processor
def __call__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__UpperCAmelCase : Tuple = self.tokenizer
__UpperCAmelCase : Any = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
# add pixel_values
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
if text is not None:
__UpperCAmelCase : Dict = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
else:
__UpperCAmelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self ):
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 168 | '''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A (__magic_name__ ):
def __get__( self , UpperCamelCase_ , UpperCamelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
__UpperCAmelCase : List[str] = "__cached_" + self.fget.__name__
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if cached is None:
__UpperCAmelCase : List[str] = self.fget(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return cached
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if is_torch_fx_proxy(lowerCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return _is_numpy(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.device )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ )
else:
return False
return isinstance(lowerCamelCase__ , torch.dtype )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCamelCase__ , tf.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCamelCase__ )
return type(lowerCamelCase__ ) == tf.Tensor
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase__ , jnp.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return [to_py_obj(lowerCamelCase__ ) for o in obj]
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ ).tolist()
elif isinstance(lowerCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return np.array(lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ )
else:
return obj
class __A (__magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : Any = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
__UpperCAmelCase : Dict = getattr(self , class_fields[0].name )
__UpperCAmelCase : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = first_field.items()
__UpperCAmelCase : Union[str, Any] = True
else:
try:
__UpperCAmelCase : Optional[int] = iter(UpperCamelCase_ )
__UpperCAmelCase : Dict = True
except TypeError:
__UpperCAmelCase : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase_ ):
if (
not isinstance(UpperCamelCase_ , (list, tuple) )
or not len(UpperCamelCase_ ) == 2
or not isinstance(element[0] , UpperCamelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCAmelCase : Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCAmelCase : List[str] = element[1]
elif first_field is not None:
__UpperCAmelCase : Optional[int] = first_field
else:
for field in class_fields:
__UpperCAmelCase : Any = getattr(self , field.name )
if v is not None:
__UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCamelCase_ , UpperCamelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
return tuple(self[k] for k in self.keys() )
class __A (__magic_name__ , __magic_name__ ):
@classmethod
def _snake_case ( cls , UpperCamelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __A (__magic_name__ ):
snake_case :Dict = "longest"
snake_case :Dict = "max_length"
snake_case :Union[str, Any] = "do_not_pad"
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "pt"
snake_case :List[str] = "tf"
snake_case :Any = "np"
snake_case :Union[str, Any] = "jax"
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = context_managers
__UpperCAmelCase : str = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase_ )
def __exit__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.stack.__exit__(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = model_class.__name__
__UpperCAmelCase : List[str] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = "" , lowerCamelCase__ = "." ) -> Optional[Any]:
"""simple docstring"""
def _flatten_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ):
for k, v in d.items():
__UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ ) + delimiter + str(lowerCamelCase__ ) if parent_key else k
if v and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
yield from flatten_dict(lowerCamelCase__ , lowerCamelCase__ , delimiter=lowerCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) )
@contextmanager
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[str, Any]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> str:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.T if axes is None else array.permute(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCamelCase__ , perm=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.reshape(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.reshape(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.expand_dims(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.unsqueeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.size(lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.size(lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCamelCase__ , (tuple, list) ):
__UpperCAmelCase : List[str] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCAmelCase : int = f"""{repo_id}--{value}"""
return auto_map
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
for base_class in inspect.getmro(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = base_class.__module__
__UpperCAmelCase : Union[str, Any] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 168 | 1 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCAmelCase__ = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def _lowerCamelCase ( __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
return (preds == labels).mean()
def _lowerCamelCase ( __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
SCREAMING_SNAKE_CASE_ = simple_accuracy(__a, __a )
SCREAMING_SNAKE_CASE_ = fa_score(y_true=__a, y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _lowerCamelCase ( __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
SCREAMING_SNAKE_CASE_ = pearsonr(__a, __a )[0]
SCREAMING_SNAKE_CASE_ = spearmanr(__a, __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _lowerCamelCase ( __a, __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
assert len(__a ) == len(__a ), F'Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a, __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "mrpc":
return acc_and_fa(__a, __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a, __a )
elif task_name == "qqp":
return acc_and_fa(__a, __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a, __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a, __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a, __a )}
else:
raise KeyError(__a )
def _lowerCamelCase ( __a, __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
if len(__a ) != len(__a ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__a, __a )}
else:
raise KeyError(__a ) | 711 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = FlaxAutoencoderKL
@property
def __a ( self ):
_lowercase : Optional[Any] = 4
_lowercase : List[str] = 3
_lowercase : int = (3_2, 3_2)
_lowercase : List[str] = jax.random.PRNGKey(0 )
_lowercase : Union[str, Any] = jax.random.uniform(_lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __a ( self ):
_lowercase : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ : int = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Any = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 461 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for a, b in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertAlmostEqual(_UpperCamelCase , _UpperCamelCase , delta=_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_UpperCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = None
ops.enable_eager_execution_internal()
lowerCAmelCase_ = tf.config.list_physical_devices("CPU" )
if len(_UpperCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase_ = tf.config.list_logical_devices(device_type="CPU" )
lowerCAmelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase_ = GradientAccumulator()
lowerCAmelCase_ = tf.Variable([4.0, 3.0] )
lowerCAmelCase_ , lowerCAmelCase_ = create_optimizer(5e-5 , 10 , 5 )
lowerCAmelCase_ = tf.Variable([0.0, 0.0] , trainable=_UpperCamelCase )
def accumulate_on_replica(_UpperCamelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_UpperCamelCase , _UpperCamelCase ):
with strategy.scope():
lowerCAmelCase_ = strategy.experimental_local_results(_UpperCamelCase )
local_variables[0].assign(_UpperCamelCase )
local_variables[1].assign(_UpperCamelCase )
strategy.run(_UpperCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_UpperCamelCase )
def _check_local_values(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _UpperCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _UpperCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 290 |
from math import sqrt
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = 0
for i in range(1 , int(sqrt(__lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCAmelCase ):
total += i + n // i
elif i == sqrt(__lowerCAmelCase ):
total += i
return total - n
def lowerCamelCase__ ( __lowerCAmelCase : int = 10000 ):
"""simple docstring"""
lowerCAmelCase_ = sum(
i
for i in range(1 , __lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(__lowerCAmelCase ) ) == i and sum_of_divisors(__lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 290 | 1 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = args.log_outputs
UpperCAmelCase_ = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase_ = load_metric("wer" )
UpperCAmelCase_ = load_metric("cer" )
# compute metrics
UpperCAmelCase_ = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase_ = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase_ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(snake_case_ )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase_ = f"""log_{dataset_id}_predictions.txt"""
UpperCAmelCase_ = f"""log_{dataset_id}_targets.txt"""
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ , lowerCAmelCase__ ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase_ = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase_ = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase_ = " ".join(text.split(snake_case_ ) )
return text
def a__ ( lowerCAmelCase__ ):
# load dataset
UpperCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase_ = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase_ = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase_ = 0 if torch.cuda.is_available() else -1
UpperCAmelCase_ = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ ):
UpperCAmelCase_ = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase_ = prediction["text"]
UpperCAmelCase_ = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase_ = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase = parser.parse_args()
main(args)
| 702 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 | 0 |
lowerCAmelCase_ = range(2, 2_0 + 1)
lowerCAmelCase_ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase_ = {}
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
_snake_case : Dict = sum(a_i[j] for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ) )
_snake_case : List[Any] = sum(a_i[j] * base[j] for j in range(min(len(__UpperCAmelCase ) , __UpperCAmelCase ) ) )
_snake_case , _snake_case : str = 0, 0
_snake_case : Optional[Any] = n - i
_snake_case : Any = memo.get(__UpperCAmelCase )
if sub_memo is not None:
_snake_case : Any = sub_memo.get(__UpperCAmelCase )
if jumps is not None and len(__UpperCAmelCase ) > 0:
# find and make the largest jump without going over
_snake_case : Dict = -1
for _k in range(len(__UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_snake_case : str = _k
break
if max_jump >= 0:
_snake_case , _snake_case , _snake_case : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_snake_case : int = diff + c
for j in range(min(__UpperCAmelCase , len(__UpperCAmelCase ) ) ):
_snake_case , _snake_case : Any = divmod(__UpperCAmelCase , 10 )
if new_c > 0:
add(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_snake_case : List[Any] = []
else:
_snake_case : Any = {c: []}
_snake_case : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_snake_case , _snake_case : str = next_term(__UpperCAmelCase , k - 1 , i + dn , __UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_snake_case , _snake_case : List[str] = compute(__UpperCAmelCase , __UpperCAmelCase , i + dn , __UpperCAmelCase )
diff += _diff
dn += terms_jumped
_snake_case : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
_snake_case : Optional[int] = 0
while j < len(__UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
if i >= n:
return 0, i
if k > len(__UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_snake_case : List[str] = i
_snake_case , _snake_case , _snake_case : Optional[int] = 0, 0, 0
for j in range(len(__UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_snake_case : List[Any] = ds_c + ds_b
diff += addend
_snake_case : Dict = 0
for j in range(__UpperCAmelCase ):
_snake_case : List[str] = a_i[j] + addend
_snake_case , _snake_case : str = divmod(__UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return diff, i - start_i
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
_snake_case : List[Any] = digits[j] + addend
if s >= 10:
_snake_case , _snake_case : Union[str, Any] = divmod(__UpperCAmelCase , 10 )
_snake_case : Tuple = addend // 10 + quotient
else:
_snake_case : Optional[int] = s
_snake_case : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
_snake_case , _snake_case : Tuple = divmod(__UpperCAmelCase , 10 )
digits.append(__UpperCAmelCase )
def A_ ( lowercase_ = 10**15 ) -> int:
_snake_case : Optional[Any] = [1]
_snake_case : List[Any] = 1
_snake_case : Optional[int] = 0
while True:
_snake_case , _snake_case : List[str] = next_term(__UpperCAmelCase , 20 , i + dn , __UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_snake_case : Dict = 0
for j in range(len(__UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __a :
def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple=14 ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : str=True ,lowerCamelCase : List[str]=True ,lowerCamelCase : Dict=True ,lowerCamelCase : Any=True ,lowerCamelCase : int=True ,lowerCamelCase : Dict=99 ,lowerCamelCase : Dict=32 ,lowerCamelCase : Optional[Any]=5 ,lowerCamelCase : Tuple=4 ,lowerCamelCase : Optional[int]=37 ,lowerCamelCase : Optional[int]="gelu" ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Dict=512 ,lowerCamelCase : int=16 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : str=3 ,lowerCamelCase : Union[str, Any]=4 ,lowerCamelCase : Any=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = use_mc_token_ids
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = self.vocab_size - 1
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_mc_token_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Any ,lowerCamelCase : List[str] ,lowerCamelCase : str ,*lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase ,token_type_ids=lowerCamelCase ,head_mask=lowerCamelCase )
model(lowerCamelCase ,token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : Dict ,*lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,*lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class __a ( _snake_case, _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase : Dict = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCamelCase : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[str] = True
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = False
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Any ,lowerCamelCase : str ,lowerCamelCase : List[str] ,lowerCamelCase : int ,lowerCamelCase : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,n_embd=37 )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[1_1859, 0, 1611, 8]] ,dtype=torch.long ,device=lowerCamelCase ) # Legal the president is
__SCREAMING_SNAKE_CASE = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() ,lowerCamelCase )
| 109 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: int = 13 , UpperCamelCase: int = 64 , UpperCamelCase: int = 2 , UpperCamelCase: int = 3 , UpperCamelCase: int = 3 , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: int = 1_28 , UpperCamelCase: Optional[Any]=[16, 32, 64, 1_28] , UpperCamelCase: int = 7 , UpperCamelCase: int = 4 , UpperCamelCase: int = 37 , UpperCamelCase: str = "gelu" , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: int = 10 , UpperCamelCase: float = 0.02 , UpperCamelCase: int = 2 , UpperCamelCase: int = 1 , UpperCamelCase: int = 1_28 , UpperCamelCase: List[int] = [2, 2, 2, 2] , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , ) -> Dict:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = encoder_stride
snake_case__ = num_attention_outputs
snake_case__ = embed_dim
snake_case__ = embed_dim + 1
snake_case__ = resolution
snake_case__ = depths
snake_case__ = hidden_sizes
snake_case__ = dim
snake_case__ = mlp_expansion_ratio
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any] ) -> int:
snake_case__ = TFEfficientFormerModel(config=UpperCamelCase )
snake_case__ = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: Tuple ) -> List[Any]:
snake_case__ = self.type_sequence_label_size
snake_case__ = TFEfficientFormerForImageClassification(UpperCamelCase )
snake_case__ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = TFEfficientFormerForImageClassification(UpperCamelCase )
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: str ) -> Optional[int]:
snake_case__ = TFEfficientFormerModelTester(self )
snake_case__ = ConfigTester(
self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase_ ( self: int ) -> str:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(UpperCamelCase )
snake_case__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
def check_hidden_states_output(UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] ):
snake_case__ = model_class(UpperCamelCase )
snake_case__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
if hasattr(self.model_tester , 'encoder_seq_length' ):
snake_case__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
snake_case__ = seq_length * self.model_tester.chunk_length
else:
snake_case__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
snake_case__ = getattr(self.model_tester , 'seq_length' , UpperCamelCase )
snake_case__ = getattr(self.model_tester , 'decoder_seq_length' , UpperCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: List[Any]=False ) -> List[str]:
snake_case__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowerCAmelCase_ ( self: Dict ) -> Dict:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFEfficientFormerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Tuple:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = True
snake_case__ = getattr(self.model_tester , 'seq_length' , UpperCamelCase )
snake_case__ = getattr(self.model_tester , 'encoder_seq_length' , UpperCamelCase )
snake_case__ = getattr(self.model_tester , 'key_length' , UpperCamelCase )
snake_case__ = getattr(self.model_tester , 'chunk_length' , UpperCamelCase )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
snake_case__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = model_class(UpperCamelCase )
snake_case__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ = True
snake_case__ = model_class(UpperCamelCase )
snake_case__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case__ = model_class(UpperCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case__ = model(UpperCamelCase )
self.assertTrue(outputs_dict is not None )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case__ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=UpperCamelCase , return_tensors='tf' )
# forward pass
snake_case__ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
snake_case__ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> List[Any]:
snake_case__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=UpperCamelCase , return_tensors='tf' )
# forward pass
snake_case__ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
snake_case__ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 372 |
from collections.abc import Sequence
def a_ ( _A , _A ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_A ) )
def a_ ( _A , _A ) -> float:
"""simple docstring"""
snake_case__ = 0.0
for coeff in reversed(_A ):
snake_case__ = result * x + coeff
return result
if __name__ == "__main__":
__UpperCamelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 372 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
SCREAMING_SNAKE_CASE = R"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(_lowerCAmelCase )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Dict = '''rag'''
UpperCamelCase_ : int = True
def __init__( self : Tuple , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : str=" / " , UpperCAmelCase_ : Union[str, Any]=" // " , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : str=300 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : List[str]="wiki_dpr" , UpperCAmelCase_ : Union[str, Any]="train" , UpperCAmelCase_ : Optional[Any]="compressed" , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Union[str, Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , prefix=UpperCAmelCase_ , vocab_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCamelCase = kwargs.pop("question_encoder" )
UpperCamelCase = question_encoder_config.pop("model_type" )
UpperCamelCase = kwargs.pop("generator" )
UpperCamelCase = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = reduce_loss
UpperCamelCase = label_smoothing
UpperCamelCase = exclude_bos_score
UpperCamelCase = do_marginalize
UpperCamelCase = title_sep
UpperCamelCase = doc_sep
UpperCamelCase = n_docs
UpperCamelCase = max_combined_length
UpperCamelCase = dataset
UpperCamelCase = dataset_split
UpperCamelCase = index_name
UpperCamelCase = retrieval_vector_size
UpperCamelCase = retrieval_batch_size
UpperCamelCase = passages_path
UpperCamelCase = index_path
UpperCamelCase = use_dummy_dataset
UpperCamelCase = output_retrieved
UpperCamelCase = do_deduplication
UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
UpperCamelCase = getattr(self.generator , "forced_eos_token_id" , UpperCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : Optional[Any] )-> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.question_encoder.to_dict()
UpperCamelCase = self.generator.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 554 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=1_00 , UpperCAmelCase_=" " )-> List[str]:
"""simple docstring"""
UpperCamelCase = text.split(UpperCAmelCase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )]
def lowerCamelCase__ ( UpperCAmelCase_ )-> dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(UpperCAmelCase_ ):
titles.append(title if title is not None else "" )
texts.append(UpperCAmelCase_ )
return {"title": titles, "text": texts}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> dict:
"""simple docstring"""
UpperCamelCase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=UpperCAmelCase_ , padding="longest" , return_tensors="pt" )["input_ids"]
UpperCamelCase = ctx_encoder(input_ids.to(device=UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )-> List[str]:
"""simple docstring"""
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCamelCase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCamelCase = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCamelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ )
UpperCamelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCamelCase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCamelCase = dataset.map(
partial(UpperCAmelCase_ , ctx_encoder=UpperCAmelCase_ , ctx_tokenizer=UpperCAmelCase_ ) , batched=UpperCAmelCase_ , batch_size=processing_args.batch_size , features=UpperCAmelCase_ , )
# And finally save your dataset
UpperCamelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(UpperCAmelCase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCamelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=UpperCAmelCase_ )
# And save the index
UpperCamelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(UpperCAmelCase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
UpperCamelCase_ : str = field(
default=str(Path(_lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
UpperCamelCase_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
UpperCamelCase_ : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
UpperCamelCase_ : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=str(Path(_lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class __a :
UpperCamelCase_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
UpperCamelCase_ : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class __a :
UpperCamelCase_ : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
UpperCamelCase_ : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 554 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__ : List[str] = logging.getLogger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys())})
snake_case__ : str = field(metadata={"help": "Should contain the data files for the task."})
snake_case__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
__SCREAMING_SNAKE_CASE = processors[data_args.task_name]()
__SCREAMING_SNAKE_CASE = processor.get_labels()
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
__SCREAMING_SNAKE_CASE = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__SCREAMING_SNAKE_CASE = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase_ ) -> Dict:
__SCREAMING_SNAKE_CASE = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 702 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
raise NotImplementedError()
| 553 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _lowercase :
"""simple docstring"""
lowerCAmelCase__ = BlenderbotConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = 'gelu'
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=20 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ):
'''simple docstring'''
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = bos_token_id
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = TFBlenderbotModel(config=UpperCAmelCase ).get_decoder()
_lowercase = inputs_dict["""input_ids"""]
_lowercase = input_ids[:1, :]
_lowercase = inputs_dict["""attention_mask"""][:1, :]
_lowercase = inputs_dict["""head_mask"""]
_lowercase = 1
# first forward pass
_lowercase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
_lowercase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase = output_from_no_past[:, -3:, random_slice_idx]
_lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
def __lowerCAmelCase ( _A ,_A ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
"""simple docstring"""
if attention_mask is None:
_lowercase = tf.cast(tf.math.not_equal(_A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = TFBlenderbotModelTester(self )
_lowercase = ConfigTester(self , config_class=UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase )
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ['My friends are cool but they eat too many carbs.']
lowerCAmelCase__ = 'facebook/blenderbot-400M-distill'
@cached_property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.tokenizer(self.src_text , return_tensors="""tf""" )
_lowercase = self.model.generate(
model_inputs.input_ids , )
_lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 398 | from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( _A ):
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=UpperCAmelCase , default=UpperCAmelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=UpperCAmelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = model
_lowercase = cache
_lowercase = force
_lowercase = trust_remote_code
def _UpperCAmelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 398 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 147 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = 10
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4]
__magic_name__ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__magic_name__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__magic_name__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__magic_name__ , __magic_name__ : Optional[Any] = process_story(snake_case )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ''''''
__magic_name__ , __magic_name__ : Optional[int] = process_story(snake_case )
self.assertEqual(snake_case , [] )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__magic_name__ , __magic_name__ : Union[str, Any] = process_story(snake_case )
__magic_name__ : int = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(snake_case , snake_case )
__magic_name__ : Tuple = ['''It was the best of times.''']
self.assertEqual(snake_case , snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.tensor([1, 2, 3, 4] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case , 0 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__magic_name__ : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 23 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 1 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = 101
__magic_name__ : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__magic_name__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ : List[str] = compute_token_type_ids(snake_case , snake_case )
np.testing.assert_array_equal(snake_case , snake_case )
| 147 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
lowerCAmelCase__ :str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase__ :int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
lowerCAmelCase__ :List[Any] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
lowerCAmelCase__ :Optional[int] = tempfile.mkdtemp()
lowerCAmelCase__ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ :Optional[Any] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
# load decoder from hub
lowerCAmelCase__ :str = "hf-internal-testing/ngram-beam-search-decoder"
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase__ )
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase__ :str = self.get_decoder()
lowerCAmelCase__ :Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase__ )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCAmelCase__ :Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(lowerCAmelCase__ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.get_feature_extractor()
lowerCAmelCase__ :Tuple = self.get_tokenizer()
lowerCAmelCase__ :Optional[Any] = self.get_decoder()
lowerCAmelCase__ :Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
lowerCAmelCase__ :Optional[Any] = floats_list((3, 1_0_0_0) )
lowerCAmelCase__ :Tuple = feature_extractor(lowerCAmelCase__ , return_tensors='np' )
lowerCAmelCase__ :List[str] = processor(lowerCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_feature_extractor()
lowerCAmelCase__ :Any = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_decoder()
lowerCAmelCase__ :Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
lowerCAmelCase__ :List[str] = "This is a test string"
lowerCAmelCase__ :int = processor(text=lowerCAmelCase__ )
lowerCAmelCase__ :Tuple = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self , __UpperCAmelCase=(2, 1_0, 1_6) , __UpperCAmelCase=7_7 ):
'''simple docstring'''
np.random.seed(lowerCAmelCase__ )
return np.random.rand(*lowerCAmelCase__ )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_feature_extractor()
lowerCAmelCase__ :str = self.get_tokenizer()
lowerCAmelCase__ :Union[str, Any] = self.get_decoder()
lowerCAmelCase__ :Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
lowerCAmelCase__ :Optional[int] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
lowerCAmelCase__ :Union[str, Any] = processor.decode(lowerCAmelCase__ )
lowerCAmelCase__ :Tuple = decoder.decode_beams(lowerCAmelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_feature_extractor()
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_decoder()
lowerCAmelCase__ :Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
lowerCAmelCase__ :Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCAmelCase__ :int = processor.batch_decode(lowerCAmelCase__ )
else:
with get_context(lowerCAmelCase__ ).Pool() as pool:
lowerCAmelCase__ :Union[str, Any] = processor.batch_decode(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ :Optional[int] = list(lowerCAmelCase__ )
with get_context('fork' ).Pool() as p:
lowerCAmelCase__ :List[str] = decoder.decode_beams_batch(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ :List[str] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.lm_score )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_feature_extractor()
lowerCAmelCase__ :List[str] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_decoder()
lowerCAmelCase__ :str = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
lowerCAmelCase__ :Union[str, Any] = self._get_dummy_logits()
lowerCAmelCase__ :int = 1_5
lowerCAmelCase__ :Optional[int] = -2_0.0
lowerCAmelCase__ :Any = -4.0
lowerCAmelCase__ :Any = processor.batch_decode(
lowerCAmelCase__ , beam_width=lowerCAmelCase__ , beam_prune_logp=lowerCAmelCase__ , token_min_logp=lowerCAmelCase__ , )
lowerCAmelCase__ :int = decoded_processor_out.text
lowerCAmelCase__ :Any = list(lowerCAmelCase__ )
with get_context('fork' ).Pool() as pool:
lowerCAmelCase__ :Dict = decoder.decode_beams_batch(
lowerCAmelCase__ , lowerCAmelCase__ , beam_width=lowerCAmelCase__ , beam_prune_logp=lowerCAmelCase__ , token_min_logp=lowerCAmelCase__ , )
lowerCAmelCase__ :Optional[int] = [d[0][0] for d in decoded_decoder_out]
lowerCAmelCase__ :str = [d[0][2] for d in decoded_decoder_out]
lowerCAmelCase__ :Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , lowerCAmelCase__ )
self.assertTrue(np.array_equal(lowerCAmelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , lowerCAmelCase__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , lowerCAmelCase__ , atol=1E-3 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_feature_extractor()
lowerCAmelCase__ :Any = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_decoder()
lowerCAmelCase__ :Any = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
lowerCAmelCase__ :int = self._get_dummy_logits()
lowerCAmelCase__ :List[str] = 2.0
lowerCAmelCase__ :Any = 5.0
lowerCAmelCase__ :int = -2_0.0
lowerCAmelCase__ :Dict = True
lowerCAmelCase__ :Optional[int] = processor.batch_decode(
lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , unk_score_offset=lowerCAmelCase__ , lm_score_boundary=lowerCAmelCase__ , )
lowerCAmelCase__ :Union[str, Any] = decoded_processor_out.text
lowerCAmelCase__ :List[str] = list(lowerCAmelCase__ )
decoder.reset_params(
alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , unk_score_offset=lowerCAmelCase__ , lm_score_boundary=lowerCAmelCase__ , )
with get_context('fork' ).Pool() as pool:
lowerCAmelCase__ :Union[str, Any] = decoder.decode_beams_batch(
lowerCAmelCase__ , lowerCAmelCase__ , )
lowerCAmelCase__ :str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , lowerCAmelCase__ )
lowerCAmelCase__ :Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase__ )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCAmelCase__ :str = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ :Tuple = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
lowerCAmelCase__ :Optional[int] = os.listdir(lowerCAmelCase__ )
lowerCAmelCase__ :Union[str, Any] = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
lowerCAmelCase__ :Any = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ :Dict = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ :List[Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
lowerCAmelCase__ :Tuple = os.listdir(lowerCAmelCase__ )
lowerCAmelCase__ :int = os.listdir(lowerCAmelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCAmelCase__ :List[str] = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCAmelCase__ :List[Any] = floats_list((3, 1_0_0_0) )
lowerCAmelCase__ :int = processor_wavaveca(lowerCAmelCase__ , return_tensors='np' )
lowerCAmelCase__ :Union[str, Any] = processor_auto(lowerCAmelCase__ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowerCAmelCase__ :List[str] = self._get_dummy_logits()
lowerCAmelCase__ :List[Any] = processor_wavaveca.batch_decode(lowerCAmelCase__ )
lowerCAmelCase__ :Dict = processor_auto.batch_decode(lowerCAmelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.get_feature_extractor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :List[Any] = self.get_decoder()
lowerCAmelCase__ :List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCAmelCase__ :List[Any] = self._get_dummy_logits()[0]
lowerCAmelCase__ :List[Any] = processor.decode(lowerCAmelCase__ , output_word_offsets=lowerCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCAmelCase__ :List[Any] = self._get_dummy_logits()
lowerCAmelCase__ :Union[str, Any] = processor.batch_decode(lowerCAmelCase__ , output_word_offsets=lowerCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(lowerCAmelCase__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :Optional[int] = load_dataset('common_voice' , 'en' , split='train' , streaming=lowerCAmelCase__ )
lowerCAmelCase__ :Tuple = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
lowerCAmelCase__ :str = iter(lowerCAmelCase__ )
lowerCAmelCase__ :Dict = next(lowerCAmelCase__ )
lowerCAmelCase__ :Dict = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
lowerCAmelCase__ :Any = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCAmelCase__ :List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
lowerCAmelCase__ :int = model(lowerCAmelCase__ ).logits.cpu().numpy()
lowerCAmelCase__ :Tuple = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase__ )
lowerCAmelCase__ :Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCAmelCase__ :List[str] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
lowerCAmelCase__ :str = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase__ , 'word' ) ) , lowerCAmelCase__ )
self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase__ , 'word' ) ) , output.text )
# output times
lowerCAmelCase__ :List[str] = torch.tensor(self.get_from_offsets(lowerCAmelCase__ , 'start_time' ) )
lowerCAmelCase__ :Any = torch.tensor(self.get_from_offsets(lowerCAmelCase__ , 'end_time' ) )
# fmt: off
lowerCAmelCase__ :int = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
lowerCAmelCase__ :List[Any] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=0.01 ) )
| 93 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__UpperCAmelCase = None
__UpperCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__UpperCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : bool = True
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "PIL.Image.Image"
lowercase__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase__ : str = field(default="Image" , init=a__ , repr=a__ )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = np.array(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
lowerCAmelCase__ = {}
lowerCAmelCase__ , lowerCAmelCase__ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(lowerCamelCase_ ):
lowerCAmelCase__ = PIL.Image.open(lowerCamelCase_ )
else:
lowerCAmelCase__ = path.split('''::''' )[-1]
try:
lowerCAmelCase__ = string_to_dict(lowerCamelCase_ , config.HUB_DATASETS_URL )['''repo_id''']
lowerCAmelCase__ = token_per_repo_id.get(lowerCamelCase_ )
except ValueError:
lowerCAmelCase__ = None
with xopen(lowerCamelCase_ , '''rb''' , use_auth_token=lowerCamelCase_ ) as f:
lowerCAmelCase__ = BytesIO(f.read() )
lowerCAmelCase__ = PIL.Image.open(bytes_ )
else:
lowerCAmelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(lowerCamelCase_ ) , type=pa.binary() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowerCAmelCase__ = storage.field('''bytes''' )
else:
lowerCAmelCase__ = pa.array([None] * len(lowerCamelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowerCAmelCase__ = storage.field('''path''' )
else:
lowerCAmelCase__ = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCAmelCase__ = pa.array(
[encode_np_array(np.array(lowerCamelCase_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCAmelCase__ = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ , self.pa_type )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase_ ):
with xopen(lowerCamelCase_ , '''rb''' ) as f:
lowerCAmelCase__ = f.read()
return bytes_
lowerCAmelCase__ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase__ = pa.array(
[os.path.basename(lowerCamelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ , self.pa_type )
def _snake_case ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCAmelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _snake_case ( A ) -> bytes:
lowerCAmelCase__ = BytesIO()
if image.format in list_image_compression_formats():
lowerCAmelCase__ = image.format
else:
lowerCAmelCase__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(A , format=A )
return buffer.getvalue()
def _snake_case ( A ) -> dict:
if hasattr(A , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(A )}
def _snake_case ( A ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
lowerCAmelCase__ = array.dtype
lowerCAmelCase__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
lowerCAmelCase__ = dtype.kind
lowerCAmelCase__ = dtype.itemsize
lowerCAmelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCAmelCase__ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCAmelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCAmelCase__ = dtype_byteorder + dtype_kind + str(A )
lowerCAmelCase__ = np.dtype(A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
lowerCAmelCase__ = PIL.Image.fromarray(array.astype(A ) )
return {"path": None, "bytes": image_to_bytes(A )}
def _snake_case ( A ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
lowerCAmelCase__ , lowerCAmelCase__ = first_non_null_value(A )
if isinstance(A , A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(A , np.ndarray ):
lowerCAmelCase__ = no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
elif isinstance(A , PIL.Image.Image ):
lowerCAmelCase__ = no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
else:
return objs
else:
return objs | 721 |
'''simple docstring'''
import numpy as np
import qiskit
def _snake_case ( A = 8 , A = None ) -> str:
lowerCAmelCase__ = np.random.default_rng(seed=A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase__ = rng.integers(2 , size=A )
# The set of states Alice will prepare.
lowerCAmelCase__ = rng.integers(2 , size=A )
# Measurement basis for Bob's qubits.
lowerCAmelCase__ = rng.integers(2 , size=A )
# Quantum Circuit to simulate BB84
lowerCAmelCase__ = qiskit.QuantumCircuit(A , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A ):
if alice_state[index] == 1:
bbaa_circ.x(A )
if alice_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A ):
if bob_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase__ = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase__ = qiskit.execute(A , A , shots=1 , seed_simulator=A )
# Returns the result of measurement.
lowerCAmelCase__ = job.result().get_counts(A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase__ = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A , A , A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase__ = gen_key[:key_len] if len(A ) >= key_len else gen_key.ljust(A , '''0''' )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod() | 98 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ) -> Tuple:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = 'lm_head'
_lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase ( UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Optional[Any] ) -> Optional[int]:
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__lowerCAmelCase )[0].split('.' )[-2]
_lowerCamelCase = mapped_key.replace('*' , __lowerCAmelCase )
if "weight_g" in name:
_lowerCamelCase = 'weight_g'
elif "weight_v" in name:
_lowerCamelCase = 'weight_v'
elif "bias" in name:
_lowerCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = 'weight'
else:
_lowerCamelCase = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Any ) -> Tuple:
_lowerCamelCase = full_name.split('conv_layers.' )[-1]
_lowerCamelCase = name.split('.' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]=True ) -> Optional[Any]:
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__lowerCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowerCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == 'layer' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__lowerCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__lowerCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_unispeech.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 544 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Dict = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __magic_name__ ( __lowerCAmelCase : List[str]=None ) -> List[str]:
if subparsers is not None:
__lowerCamelCase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowerCamelCase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowerCamelCase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowerCamelCase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[Any]:
__lowerCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
__lowerCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCamelCase = defaults.commands
if not args.tpu_name:
__lowerCamelCase = defaults.tpu_name
if not args.tpu_zone:
__lowerCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCamelCase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowerCamelCase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
__lowerCamelCase = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowerCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
__lowerCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCamelCase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
__lowerCamelCase = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCamelCase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {' '.join(__lowerCAmelCase )}''' )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def __magic_name__ ( ) -> Dict:
__lowerCamelCase = tpu_command_parser()
__lowerCamelCase = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 298 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Tuple =pytest.mark.integration
@require_faiss
class __A ( a ):
def _snake_case ( self ):
lowerCamelCase =Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(UpperCAmelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _snake_case ( self ):
import faiss
lowerCamelCase =self._create_dummy_dataset()
lowerCamelCase =dset.map(
lambda UpperCAmelCase_ , UpperCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ )
lowerCamelCase =dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase , lowerCamelCase =dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def _snake_case ( self ):
import faiss
lowerCamelCase =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase , lowerCamelCase =dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _snake_case ( self ):
import faiss
lowerCamelCase =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase , lowerCamelCase =dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _snake_case ( self ):
lowerCamelCase =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(UpperCAmelCase_ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def _snake_case ( self ):
from elasticsearch import Elasticsearch
lowerCamelCase =self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCamelCase ={"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase ={"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
lowerCamelCase =Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase =dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class __A ( a ):
def _snake_case ( self ):
import faiss
lowerCamelCase =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase =np.zeros(5 , dtype=np.floataa )
lowerCamelCase =1
lowerCamelCase , lowerCamelCase =index.search(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase =np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase , lowerCamelCase =index.search_batch(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search_batch , queries[0] )
lowerCamelCase =[scores[0] for scores in total_scores]
lowerCamelCase =[indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase_ )
def _snake_case ( self ):
import faiss
lowerCamelCase =FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase =FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase =FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def _snake_case ( self ):
import faiss
lowerCamelCase =faiss.IndexFlat(5 )
lowerCamelCase =FaissIndex(custom_index=UpperCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _snake_case ( self ):
import faiss
lowerCamelCase =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase =np.zeros(5 , dtype=np.floataa )
lowerCamelCase =1
lowerCamelCase , lowerCamelCase =index.search(UpperCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
import faiss
lowerCamelCase =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase ="""index.faiss"""
lowerCamelCase =F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase =FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase =np.zeros(5 , dtype=np.floataa )
lowerCamelCase =1
lowerCamelCase , lowerCamelCase =index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __A ( a ):
def _snake_case ( self ):
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCamelCase =Elasticsearch()
lowerCamelCase ={"""acknowledged""": True}
lowerCamelCase =ElasticSearchIndex(es_client=UpperCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
lowerCamelCase ="""foo"""
lowerCamelCase ={"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCamelCase , lowerCamelCase =index.search(UpperCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase ="""foo"""
lowerCamelCase ={"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCamelCase , lowerCamelCase =index.search(UpperCAmelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase =["""foo""", """bar""", """foobar"""]
lowerCamelCase ={"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCamelCase , lowerCamelCase =index.search_batch(UpperCAmelCase_ )
lowerCamelCase =[scores[0] for scores in total_scores]
lowerCamelCase =[indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
# batched queries with timeout
lowerCamelCase =["""foo""", """bar""", """foobar"""]
lowerCamelCase ={"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCamelCase , lowerCamelCase =index.search_batch(UpperCAmelCase_ , request_timeout=30 )
lowerCamelCase =[scores[0] for scores in total_scores]
lowerCamelCase =[indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
| 269 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCAmelCase__ : Union[str, Any] =get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase__ : Union[str, Any] =12_80_22
UpperCAmelCase__ : Optional[Any] =12_80_28
@require_sentencepiece
class __A ( a , unittest.TestCase ):
__A = MaMaaaTokenizer
__A = False
__A = False
__A = True
def _snake_case ( self ):
super().setUp()
lowerCamelCase =["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCamelCase =Path(self.tmpdirname )
save_json(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
lowerCamelCase =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , **UpperCAmelCase_ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
return (
"This is a test",
"This is a test",
)
def _snake_case ( self ):
lowerCamelCase ="""</s>"""
lowerCamelCase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase =self.get_tokenizer()
lowerCamelCase =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(UpperCAmelCase_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowerCamelCase =self.get_tokenizer()
lowerCamelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [2, 3, 4, 5, 6] , )
lowerCamelCase =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
lowerCamelCase =tokenizer.convert_tokens_to_string(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , """This is a test""" )
@slow
def _snake_case ( self ):
# fmt: off
lowerCamelCase ={"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
__A = """facebook/m2m100_418M"""
__A = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__A = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__A = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _snake_case ( cls ):
lowerCamelCase =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
lowerCamelCase =1
return cls
def _snake_case ( self ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128063 )
def _snake_case ( self ):
lowerCamelCase =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase ="""en"""
lowerCamelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def _snake_case ( self ):
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
# fmt: off
lowerCamelCase =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowerCamelCase =self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCamelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase =tempfile.mkdtemp()
lowerCamelCase =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCamelCase =MaMaaaTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase_ )
@require_torch
def _snake_case ( self ):
lowerCamelCase ="""en"""
lowerCamelCase ="""fr"""
lowerCamelCase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors="""pt""" )
lowerCamelCase =shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowerCamelCase =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _snake_case ( self ):
lowerCamelCase ="""mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowerCamelCase ="""zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _snake_case ( self ):
lowerCamelCase ="""mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowerCamelCase ="""zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _snake_case ( self ):
lowerCamelCase =self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[128022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128006,
} , )
| 269 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase_ ( lowerCAmelCase_ ):
@staticmethod
@abstractmethod
def _lowerCAmelCase ( __lowerCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _lowerCAmelCase ( self : Dict ):
raise NotImplementedError()
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case : int = get_tests_dir("""fixtures""")
__snake_case : List[Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__snake_case : Optional[int] = get_tests_dir("""fixtures/dummy-config.json""")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCAmelCase__ = WavaVecaFeatureExtractor(**_UpperCamelCase )
# save in new folder
model_config.save_pretrained(_UpperCamelCase )
config.save_pretrained(_UpperCamelCase )
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , revision='aaaaaa' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCamelCase )
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCamelCase )
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , _UpperCamelCase )
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCamelCase )
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
"""simple docstring"""
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Dict = True
try:
AutoConfig.register('custom' , _UpperCamelCase )
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_UpperCamelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 365 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : str = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
__snake_case : int = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(r'.*layer_(\d*).*' , UpperCamelCase_ )[1] )
layer_number -= 3
return F"h.{layer_number}." + key
def _UpperCamelCase ( UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(r'[^\d](\d+)$' , str(UpperCamelCase_ ) )
if bit_search is None:
raise ValueError(F"`dtype` is not a valid dtype: {dtype}." )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(UpperCamelCase_ )
if shard_model:
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(UpperCamelCase_ ):
print('Processing file: {}'.format(UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace('model_00' , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase_ , os.path.join(
UpperCamelCase_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '\n'
f.write(UpperCamelCase_ )
else:
lowerCAmelCase__ = BloomModel(UpperCamelCase_ )
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i, file in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace('model_00' , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
__snake_case : str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 365 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : List[str] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Tuple = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 83 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 720 |
import random
def lowercase_ (A : int ):
snake_case__ : List[str] = num - 1
snake_case__ : Union[str, Any] = 0
while s % 2 == 0:
snake_case__ : Any = s // 2
t += 1
for _ in range(5 ):
snake_case__ : List[Any] = random.randrange(2 , num - 1 )
snake_case__ : Tuple = pow(A , A , A )
if v != 1:
snake_case__ : str = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case__ : Tuple = i + 1
snake_case__ : Optional[int] = (v**2) % num
return True
def lowercase_ (A : int ):
if num < 2:
return False
snake_case__ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A )
def lowercase_ (A : int = 1_0_2_4 ):
while True:
snake_case__ : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A ):
return num
if __name__ == "__main__":
a_ :Any = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 243 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCAmelCase__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCAmelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
if "://" in dataset_path:
_snake_case = dataset_path.split('''://''' )[1]
return dataset_path
def _UpperCAmelCase ( __lowerCamelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _UpperCAmelCase ( __lowerCamelCase : fsspec.AbstractFileSystem , __lowerCamelCase : str , __lowerCamelCase : str ) -> Union[str, Any]:
_snake_case = not is_remote_filesystem(__lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCamelCase ) , fs._strip_protocol(__lowerCamelCase ) )
else:
fs.mv(__lowerCamelCase , __lowerCamelCase , recursive=__lowerCamelCase )
def _UpperCAmelCase ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_snake_case = None
_snake_case = None
_snake_case = threading.Lock()
| 224 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any]="ro" , __lowerCamelCase : Optional[Any]="en" , __lowerCamelCase : Optional[int]="wmt16" , __lowerCamelCase : Tuple=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_snake_case = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
_snake_case = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
_snake_case = f'''{dataset}-{pair}'''
_snake_case = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_snake_case = '''val''' if split == '''validation''' else split
_snake_case = save_dir.joinpath(f'''{fn}.source''' )
_snake_case = save_dir.joinpath(f'''{fn}.target''' )
_snake_case = src_path.open('''w+''' )
_snake_case = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_snake_case = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 224 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class snake_case (UpperCamelCase ):
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = {}
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> str:
lowercase__ = super().add_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=1 ,**UpperCAmelCase_ ) -> List[Any]:
lowercase__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
else:
lowercase__ = []
for i in range(UpperCAmelCase_ ):
lowercase__ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowercase__ = output
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ) -> Optional[int]:
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = []
for i in range(len(UpperCAmelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=UpperCAmelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase__ = self.token_map[placeholder_token]
lowercase__ = tokens[: 1 + int(len(UpperCAmelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase__ = copy.copy(UpperCAmelCase_ )
random.shuffle(UpperCAmelCase_ )
lowercase__ = text.replace(UpperCAmelCase_ ," ".join(UpperCAmelCase_ ) )
return text
def __call__( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ,**UpperCAmelCase_ ) -> int:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ ,vector_shuffle=UpperCAmelCase_ ,prop_tokens_to_load=UpperCAmelCase_ ) ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,)
def _a ( self ,UpperCAmelCase_ ,*UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=1.0 ,**UpperCAmelCase_ ) -> Optional[int]:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ ,vector_shuffle=UpperCAmelCase_ ,prop_tokens_to_load=UpperCAmelCase_ ) ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,)
| 539 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Optional[int] = CodeGenTokenizer
lowerCAmelCase__ :List[Any] = CodeGenTokenizerFast
lowerCAmelCase__ :str = True
lowerCAmelCase__ :Tuple = {"add_prefix_space": True}
lowerCAmelCase__ :Dict = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
lowercase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _a ( self ,**UpperCAmelCase_ ) -> int:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def _a ( self ,**UpperCAmelCase_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = "lower newer"
lowercase__ = "lower newer"
return input_text, output_text
def _a ( self ) -> Optional[int]:
lowercase__ = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowercase__ = "lower newer"
lowercase__ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ = tokenizer.tokenize(UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase_ )
lowercase__ = "lower newer"
# Testing tokenization
lowercase__ = tokenizer.tokenize(UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ )
lowercase__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
# Testing conversion to ids without special tokens
lowercase__ = tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
# Testing conversion to ids with special tokens
lowercase__ = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase_ )
lowercase__ = tokenizer.encode(UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ )
lowercase__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
# Testing the unknown token
lowercase__ = tokens + [rust_tokenizer.unk_token]
lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self ,UpperCAmelCase_=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
# Simple input
lowercase__ = "This is a simple input"
lowercase__ = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ = ("This is a simple input", "This is a pair")
lowercase__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" )
# Simple input
self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" )
# Simple input
self.assertRaises(
UpperCAmelCase_ ,tokenizer_r.batch_encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ,)
# Pair input
self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" )
# Pair input
self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" )
# Pair input
self.assertRaises(
UpperCAmelCase_ ,tokenizer_r.batch_encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ,)
def _a ( self ) -> Optional[Any]:
lowercase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowercase__ = "This is a simple input"
lowercase__ = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ = ("This is a simple input", "This is a pair")
lowercase__ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer(UpperCAmelCase_ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowercase__ = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncate=UpperCAmelCase_ ,return_tensors="np" )
lowercase__ = tokenizer(*UpperCAmelCase_ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowercase__ = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncate=UpperCAmelCase_ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def _a ( self ) -> List[str]:
lowercase__ = "$$$"
lowercase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=UpperCAmelCase_ ,add_bos_token=UpperCAmelCase_ )
lowercase__ = "This is a simple input"
lowercase__ = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer(UpperCAmelCase_ )
lowercase__ = tokenizer(UpperCAmelCase_ )
self.assertEqual(out_s.input_ids[0] ,UpperCAmelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ = tokenizer.decode(out_s.input_ids )
lowercase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,UpperCAmelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _a ( self ) -> List[Any]:
lowercase__ = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowercase__ = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowercase__ = "\nif len_a > len_b: result = a\nelse: result = b"
lowercase__ = tokenizer.encode(UpperCAmelCase_ )
lowercase__ = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowercase__ = tokenizer.decode(UpperCAmelCase_ ,truncate_before_pattern=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ) -> Any:
pass
| 539 | 1 |
"""simple docstring"""
from collections import deque
class __snake_case :
def __init__( self : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = process_name # process name
_lowerCamelCase : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowerCamelCase : Optional[int] = arrival_time
_lowerCamelCase : int = burst_time # remaining burst time
_lowerCamelCase : Dict = 0 # total time of the process wait in ready queue
_lowerCamelCase : Tuple = 0 # time from arrival time to completion time
class __snake_case :
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCamelCase : Any = number_of_queues
# time slice of queues that round robin algorithm applied
_lowerCamelCase : Any = time_slices
# unfinished process is in this ready_queue
_lowerCamelCase : Dict = queue
# current time
_lowerCamelCase : List[Any] = current_time
# finished process is in this sequence queue
_lowerCamelCase : deque[Process] = deque()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : list[Process] ):
"""simple docstring"""
_lowerCamelCase : int = []
for i in range(len(__lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : list[Process] ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(__lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : list[Process] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
for i in range(len(__lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : deque[Process] ):
"""simple docstring"""
_lowerCamelCase : deque[Process] = deque() # sequence deque of finished process
while len(__lowerCAmelCase ) != 0:
_lowerCamelCase : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowerCamelCase : List[Any] = 0
# set the process's turnaround time because it is finished
_lowerCamelCase : Tuple = self.current_time - cp.arrival_time
# set the completion time
_lowerCamelCase : Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowerCamelCase : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowerCamelCase : int = 0
# set the finish time
_lowerCamelCase : Optional[int] = self.current_time
# update the process' turnaround time because it is finished
_lowerCamelCase : int = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase__ = Process('''P1''', 0, 53)
lowerCAmelCase__ = Process('''P2''', 0, 17)
lowerCAmelCase__ = Process('''P3''', 0, 68)
lowerCAmelCase__ = Process('''P4''', 0, 24)
lowerCAmelCase__ = 3
lowerCAmelCase__ = [17, 25]
lowerCAmelCase__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase__ = Process('''P1''', 0, 53)
lowerCAmelCase__ = Process('''P2''', 0, 17)
lowerCAmelCase__ = Process('''P3''', 0, 68)
lowerCAmelCase__ = Process('''P4''', 0, 24)
lowerCAmelCase__ = 3
lowerCAmelCase__ = [17, 25]
lowerCAmelCase__ = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase__ = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 83 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 1 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase__: List[Any] = "path-to-your-trained-model"
UpperCamelCase__: Tuple = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
UpperCamelCase__: Dict = "A photo of sks dog in a bucket"
UpperCamelCase__: int = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 528 |
'''simple docstring'''
UpperCamelCase__: dict[tuple[int, int, int], int] = {}
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase : int = _calculate(days - 1 , _lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase : Tuple = _calculate(days - 1 , _lowerCAmelCase , 0 )
UpperCAmelCase : str = state_late + state_absent + state_ontime
UpperCAmelCase : List[Any] = prizestrings
return prizestrings
def snake_case_ ( _lowerCAmelCase : int = 30 ) -> int:
return _calculate(_lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 528 | 1 |
from scipy.stats import spearmanr
import datasets
lowerCAmelCase_ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
lowerCAmelCase_ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
lowerCAmelCase_ = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A (datasets.Metric ):
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __a ( self , lowercase_ , lowercase_ , lowercase_=False ) -> List[str]:
'''simple docstring'''
_snake_case : Optional[int] = spearmanr(lowercase_ , lowercase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 326 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} )
_SCREAMING_SNAKE_CASE = field(
default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for training."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} )
_SCREAMING_SNAKE_CASE = field(
default=10_000 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2E-4 ,metadata={"""help""": """Learning rate fo training."""} )
_SCREAMING_SNAKE_CASE = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} )
_SCREAMING_SNAKE_CASE = field(
default=750 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
_SCREAMING_SNAKE_CASE = field(
default=16 ,metadata={"""help""": """Number of gradient accumulation steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
_SCREAMING_SNAKE_CASE = field(default=50_000 ,metadata={"""help""": """Maximum number of training steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Sequence lengths used for training."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Training seed."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_024 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Length of sequences to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Sample from the language model's output distribution."""} )
_SCREAMING_SNAKE_CASE = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=256 ,metadata={"""help""": """Maximum number of newly generated tokens."""} )
_SCREAMING_SNAKE_CASE = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.9_5 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
_SCREAMING_SNAKE_CASE = field(default=10 ,metadata={"""help""": """Number of generations to run in parallel."""} )
_SCREAMING_SNAKE_CASE = field(
default=200 ,metadata={"""help""": """Number of completions to generate for each sample."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} ,)
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} ,)
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default=100_000 ,metadata={"""help""": """Number of files to save per JSON output file."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_000 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=100 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.2_5 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """If True, near-duplicate samples are removed."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.8_5 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(default=200_000 ,metadata={"""help""": """Number of examples to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(
default=32_768 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
_SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 326 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=6 , __UpperCAmelCase=17 , __UpperCAmelCase=23 , __UpperCAmelCase=11 , __UpperCAmelCase=True , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = act_dim
__lowerCamelCase = state_dim
__lowerCamelCase = hidden_size
__lowerCamelCase = max_length
__lowerCamelCase = is_training
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__lowerCamelCase = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = DecisionTransformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DecisionTransformerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DecisionTransformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__UpperCAmelCase )] , __UpperCAmelCase )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 2 # number of steps of autoregressive prediction we will perform
__lowerCamelCase = 10 # defined by the RL environment, may be normalized
__lowerCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__lowerCamelCase = model.to(__UpperCAmelCase )
__lowerCamelCase = model.config
torch.manual_seed(0 )
__lowerCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ) # env.reset()
__lowerCamelCase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__UpperCAmelCase )
__lowerCamelCase = torch.tensor(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowerCamelCase = state
__lowerCamelCase = torch.zeros(1 , 0 , config.act_dim , device=__UpperCAmelCase , dtype=torch.floataa )
__lowerCamelCase = torch.zeros(1 , 0 , device=__UpperCAmelCase , dtype=torch.floataa )
__lowerCamelCase = torch.tensor(0 , device=__UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__UpperCAmelCase ):
__lowerCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__UpperCAmelCase )] , dim=1 )
__lowerCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=__UpperCAmelCase )] , dim=1 )
__lowerCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
states=__UpperCAmelCase , actions=__UpperCAmelCase , rewards=__UpperCAmelCase , returns_to_go=__UpperCAmelCase , timesteps=__UpperCAmelCase , attention_mask=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCamelCase = action_pred[0, -1]
__lowerCamelCase = torch.cat([states, state] , dim=1 )
__lowerCamelCase = returns_to_go[0, -1] - reward
__lowerCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowerCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=__UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 622 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE__ : Tuple = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
SCREAMING_SNAKE_CASE__ : Any = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
SCREAMING_SNAKE_CASE__ : List[Any] = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , ):
"""simple docstring"""
__magic_name__ :Optional[int] = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__magic_name__ :Any = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
__magic_name__ :List[str] = TER(
normalized=__lowerCAmelCase , no_punct=__lowerCAmelCase , asian_support=__lowerCAmelCase , case_sensitive=__lowerCAmelCase , )
__magic_name__ :Tuple = sb_ter.corpus_score(__lowerCAmelCase , __lowerCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'git_vision_model'
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=768 , _SCREAMING_SNAKE_CASE: Optional[Any]=3072 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Dict=3 , _SCREAMING_SNAKE_CASE: List[Any]=224 , _SCREAMING_SNAKE_CASE: List[str]=16 , _SCREAMING_SNAKE_CASE: str="quick_gelu" , _SCREAMING_SNAKE_CASE: Any=1e-5 , _SCREAMING_SNAKE_CASE: List[str]=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.02 , **_SCREAMING_SNAKE_CASE: str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : List[str] = intermediate_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : Dict = patch_size
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : Optional[Any] = layer_norm_eps
__lowerCAmelCase : Tuple = hidden_act
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , _SCREAMING_SNAKE_CASE: Union[str, os.PathLike] , **_SCREAMING_SNAKE_CASE: Dict) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase , __lowerCAmelCase : List[Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type") == "git":
__lowerCAmelCase : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'git'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: Dict=3_0522 , _SCREAMING_SNAKE_CASE: Any=768 , _SCREAMING_SNAKE_CASE: str=6 , _SCREAMING_SNAKE_CASE: int=12 , _SCREAMING_SNAKE_CASE: Dict=3072 , _SCREAMING_SNAKE_CASE: Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1024 , _SCREAMING_SNAKE_CASE: str=0.02 , _SCREAMING_SNAKE_CASE: Tuple=1e-12 , _SCREAMING_SNAKE_CASE: List[str]=0 , _SCREAMING_SNAKE_CASE: Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Dict=101 , _SCREAMING_SNAKE_CASE: Tuple=102 , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if vision_config is None:
__lowerCAmelCase : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
__lowerCAmelCase : List[str] = GitVisionConfig(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Optional[Any] = position_embedding_type
__lowerCAmelCase : str = use_cache
__lowerCAmelCase : str = tie_word_embeddings
__lowerCAmelCase : Optional[int] = num_image_with_embedding
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Any = eos_token_id
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__)
__lowerCAmelCase : List[Any] = self.vision_config.to_dict()
__lowerCAmelCase : Tuple = self.__class__.model_type
return output | 293 | 0 |
import unittest
from knapsack import knapsack as k
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> str:
lowerCAmelCase : List[str] = 0
lowerCAmelCase : List[str] = [0]
lowerCAmelCase : int = [0]
lowerCAmelCase : Optional[Any] = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
lowerCAmelCase : Dict = [60]
lowerCAmelCase : List[Any] = [10]
lowerCAmelCase : Optional[Any] = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : str = 3
lowerCAmelCase : Dict = [1, 2, 3]
lowerCAmelCase : str = [3, 2, 1]
lowerCAmelCase : Tuple = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 5 )
def _snake_case ( self ) -> str:
lowerCAmelCase : List[str] = 50
lowerCAmelCase : Any = [60, 100, 120]
lowerCAmelCase : str = [10, 20, 30]
lowerCAmelCase : Any = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def lowercase ( ) -> Any:
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_snake_case : Any = parser.parse_args()
return args.f
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str]) -> None:
"""simple docstring"""
_snake_case : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCAmelCase)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase):
_snake_case : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase , 0.666)
@slow
@require_torch_non_multi_gpu
def UpperCamelCase_ ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase)
_snake_case : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase)
_snake_case : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase)
| 477 |
from typing import Dict
from .base import GenericTensor, Pipeline
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
if tokenize_kwargs is None:
_snake_case : Tuple = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""")
_snake_case : str = truncation
_snake_case : Dict = tokenize_kwargs
_snake_case : List[Any] = {}
if return_tensors is not None:
_snake_case : int = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Dict[str, GenericTensor]:
"""simple docstring"""
_snake_case : int = self.framework
_snake_case : Optional[Any] = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
return model_inputs
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
_snake_case : List[str] = self.model(**lowerCAmelCase)
return model_outputs
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=False) -> Tuple:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Dict:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase)
| 477 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = 'marian'
__lowercase : List[str] = ['past_key_values']
__lowercase : int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self:List[Any] , _a:List[Any]=5_81_01 , _a:Dict=None , _a:Union[str, Any]=10_24 , _a:Union[str, Any]=12 , _a:Optional[int]=40_96 , _a:List[Any]=16 , _a:Optional[int]=12 , _a:str=40_96 , _a:Tuple=16 , _a:Tuple=0.0 , _a:Dict=0.0 , _a:str=True , _a:Union[str, Any]=True , _a:Optional[Any]="gelu" , _a:str=10_24 , _a:Dict=0.1 , _a:str=0.0 , _a:Union[str, Any]=0.0 , _a:Dict=0.02 , _a:int=5_81_00 , _a:int=False , _a:Tuple=5_81_00 , _a:List[str]=0 , _a:str=0 , _a:str=True , **_a:Any , ):
snake_case__ = vocab_size
snake_case__ = decoder_vocab_size or vocab_size
snake_case__ = max_position_embeddings
snake_case__ = d_model
snake_case__ = encoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = encoder_attention_heads
snake_case__ = decoder_ffn_dim
snake_case__ = decoder_layers
snake_case__ = decoder_attention_heads
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = encoder_layerdrop
snake_case__ = decoder_layerdrop
snake_case__ = use_cache
snake_case__ = encoder_layers
snake_case__ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case__ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ = {0: '''batch'''}
snake_case__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ = self.num_layers
for i in range(_a ):
snake_case__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ = super().outputs
else:
snake_case__ = super(_a , self ).outputs
if self.use_past:
snake_case__ , snake_case__ = self.num_layers
for i in range(_a ):
snake_case__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE__ ( self:int , _a:PreTrainedTokenizer , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional[TensorType] = None , ):
snake_case__ = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
# Generate decoder inputs
snake_case__ = seq_length if not self.use_past else 1
snake_case__ = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
snake_case__ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ = dict(**_a , **_a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ = common_inputs['''input_ids'''].shape
snake_case__ = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ = self.num_attention_heads
snake_case__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ = decoder_seq_length + 3
snake_case__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_a , _a )] , dim=1 )
snake_case__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ = self.num_layers
snake_case__ = min(_a , _a )
snake_case__ = max(_a , _a ) - min_num_layers
snake_case__ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
snake_case__ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_a , _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self:int , _a:PreTrainedTokenizer , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional[TensorType] = None , ):
snake_case__ = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ = seqlen + 2
snake_case__ , snake_case__ = self.num_layers
snake_case__ , snake_case__ = self.num_attention_heads
snake_case__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ = common_inputs['''attention_mask'''].dtype
snake_case__ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
snake_case__ = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:PreTrainedTokenizer , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ = tokenizer.num_special_tokens_to_add(_a )
snake_case__ = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
snake_case__ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ = dict(tokenizer(_a , return_tensors=_a ) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:PreTrainedTokenizer , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
else:
snake_case__ = self._generate_dummy_inputs_for_causal_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[int] , _a:Dict , _a:Union[str, Any] , _a:List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ = super()._flatten_past_key_values_(_a , _a , _a , _a )
else:
snake_case__ = super(_a , self )._flatten_past_key_values_(
_a , _a , _a , _a )
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return 1e-4
| 714 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str | Literal[False]:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
snake_case__ = '''_'''
if count > 1:
return False
else:
return "".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]:
snake_case__ = []
while True:
snake_case__ = ['''$'''] * len(__lowerCAmelCase )
snake_case__ = []
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + 1 , len(__lowerCAmelCase ) ):
snake_case__ = compare_string(binary[i] , binary[j] )
if k is False:
snake_case__ = '''*'''
snake_case__ = '''*'''
temp.append('''X''' )
for i in range(len(__lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCAmelCase ) == 0:
return pi
snake_case__ = list(set(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
for minterm in minterms:
snake_case__ = ''''''
for _ in range(__lowerCAmelCase ):
snake_case__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCAmelCase )
return temp
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
snake_case__ = [0] * len(__lowerCAmelCase )
for i in range(len(chart[0] ) ):
snake_case__ = 0
snake_case__ = -1
for j in range(len(__lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
snake_case__ = j
if count == 1:
snake_case__ = 1
for i in range(len(__lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
temp.append(prime_implicants[i] )
while True:
snake_case__ = 0
snake_case__ = -1
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = chart[i].count(1 )
if count_n > max_n:
snake_case__ = count_n
snake_case__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[int]]:
snake_case__ = [[0 for x in range(len(__lowerCAmelCase ) )] for x in range(len(__lowerCAmelCase ) )]
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = prime_implicants[i].count('''_''' )
for j in range(len(__lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCAmelCase ):
snake_case__ = 1
return chart
def SCREAMING_SNAKE_CASE ( ) -> None:
snake_case__ = int(input('''Enter the no. of variables\n''' ) )
snake_case__ = [
float(__lowerCAmelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
snake_case__ = decimal_to_binary(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = check(__lowerCAmelCase )
print('''Prime Implicants are:''' )
print(__lowerCAmelCase )
snake_case__ = prime_implicant_chart(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = selection(__lowerCAmelCase , __lowerCAmelCase )
print('''Essential Prime Implicants are:''' )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 208 | 0 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A , _A : Union[str, Any] = 1, 1
_A : Any = []
for i in range(1,n + 1 ):
_A : Tuple = prev_numerator + 2 * prev_denominator
_A : str = prev_numerator + prev_denominator
if len(str(snake_case_ ) ) > len(str(snake_case_ ) ):
result.append(snake_case_ )
_A : Dict = numerator
_A : Tuple = denominator
return len(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 307 |
import torch
from diffusers import StableDiffusionPipeline
_snake_case = "path-to-your-trained-model"
_snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_snake_case = "A photo of sks dog in a bucket"
_snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 307 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = inspect.getfile(accelerate.test_utils )
UpperCamelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCamelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase : Optional[int] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase : Dict = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCamelCase : Tuple = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase : Tuple = Accelerator()
__lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
__lowerCamelCase : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
__lowerCamelCase : str = """"""
__lowerCamelCase : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowerCamelCase : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowerCamelCase : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 38 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38 | 1 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 1_0, "max_num_jobs": 1}, [range(1_0 )]),
({"num_shards": 1_0, "max_num_jobs": 1_0}, [range(_SCREAMING_SNAKE_CASE , i + 1 ) for i in range(1_0 )]),
({"num_shards": 1, "max_num_jobs": 1_0}, [range(1 )]),
({"num_shards": 1_0, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"num_shards": 3, "max_num_jobs": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __UpperCAmelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = _distribute_shards(**_SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 1_0, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def __UpperCAmelCase ( __A , __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = _split_gen_kwargs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def __UpperCAmelCase ( __A , __A ) -> List[str]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
assert out == expected
| 475 | """simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> int:
assert column_title.isupper()
a_ : int = 0
a_ : Tuple = len(_SCREAMING_SNAKE_CASE ) - 1
a_ : Union[str, Any] = 0
while index >= 0:
a_ : List[Any] = (ord(column_title[index] ) - 64) * pow(26 , _SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 473 | 0 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase : List[str] = HfApi()
lowerCAmelCase : Tuple = {}
# fmt: off
lowerCAmelCase : List[str] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
lowerCAmelCase : Dict = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
lowerCAmelCase : str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
lowerCAmelCase : List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
lowerCAmelCase : int = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
lowerCAmelCase : int = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
lowerCAmelCase : List[str] = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
lowerCAmelCase : Optional[int] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
lowerCAmelCase : Any = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
lowerCAmelCase : str = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
lowerCAmelCase : List[str] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
lowerCAmelCase : Optional[Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
lowerCAmelCase : int = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
lowerCAmelCase : Optional[int] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase : Dict = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
lowerCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase : Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 715 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase : Optional[Any] = """sshleifer/bart-tiny-random"""
lowerCAmelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(_a ):
create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=_a , d=_a )
| 533 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = self.dummy_uncond_unet
lowercase__ = ScoreSdeVeScheduler()
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[
0
]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = '''google/ncsnpp-church-256'''
lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
a_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__lowerCamelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token
__lowerCamelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCamelCase = unk_token if pad_token is None else pad_token
__lowerCamelCase = eos_token if bos_token is None else bos_token
else:
__lowerCamelCase = '''<pad>''' if pad_token is None else pad_token
__lowerCamelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
__lowerCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCamelCase = re.compile(
F"""[{"".join(map(__UpperCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.non_printing_characters_re.sub('''''' , __UpperCAmelCase )
# Normalize whitespaces
__lowerCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__lowerCamelCase = unicodedata.normalize('''NFC''' , __UpperCAmelCase )
return text
def lowerCamelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
return out_string
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase )
else:
__lowerCamelCase = [self.preprocess_text(__UpperCAmelCase ) for t in text]
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
__lowerCamelCase = torch.tensor(__UpperCAmelCase )
return token_ids
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__lowerCamelCase = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 175 | 0 |
import os
import sys
__UpperCAmelCase :str = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCAmelCase :Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _a ( *_lowercase : List[str] , **_lowercase : Any ):
'''simple docstring'''
return AutoConfig.from_pretrained(*_lowercase , **_lowercase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _a ( *_lowercase : Optional[Any] , **_lowercase : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_lowercase , **_lowercase )
@add_start_docstrings(AutoModel.__doc__ )
def _a ( *_lowercase : Tuple , **_lowercase : Tuple ):
'''simple docstring'''
return AutoModel.from_pretrained(*_lowercase , **_lowercase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _a ( *_lowercase : Optional[int] , **_lowercase : int ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_lowercase , **_lowercase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _a ( *_lowercase : Dict , **_lowercase : Tuple ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_lowercase , **_lowercase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _a ( *_lowercase : str , **_lowercase : str ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_lowercase , **_lowercase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _a ( *_lowercase : Optional[int] , **_lowercase : Optional[Any] ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_lowercase , **_lowercase ) | 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :List[Any] = logging.get_logger(__name__)
def _a ( _lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__UpperCAmelCase : int = BitConfig(
conv_layer=_lowercase , num_labels=1000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if "stem.conv" in name:
__UpperCAmelCase : Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
__UpperCAmelCase : Dict = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
__UpperCAmelCase : List[Any] = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__UpperCAmelCase : List[str] = '''bit.encoder.''' + name
return name
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _a ( _lowercase : Tuple , _lowercase : Dict , _lowercase : Tuple=False ):
'''simple docstring'''
__UpperCAmelCase : Any = get_config(_lowercase )
# load original model from timm
__UpperCAmelCase : Tuple = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
__UpperCAmelCase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__UpperCAmelCase : Optional[Any] = state_dict.pop(_lowercase )
__UpperCAmelCase : int = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__UpperCAmelCase : Any = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
__UpperCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_lowercase ) )
__UpperCAmelCase : Union[str, Any] = transform.transforms
__UpperCAmelCase : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__UpperCAmelCase : List[Any] = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : Any = transform(_lowercase ).unsqueeze(0 )
__UpperCAmelCase : List[Any] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(_lowercase )
__UpperCAmelCase : Union[str, Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
__UpperCAmelCase : Optional[int] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 266 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = ort.SessionOptions()
A_ = False
return options
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
A_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
A_ = '''A red cat sitting on a park bench'''
A_ = np.random.RandomState(0 )
A_ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=a__ , output_type='''np''' , )
A_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-2 | 141 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __lowercase ( A ):
__magic_name__ : Optional[int] = '''big_bird'''
def __init__( self , a__=5_0_3_5_8 , a__=7_6_8 , a__=1_2 , a__=1_2 , a__=3_0_7_2 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=4_0_9_6 , a__=2 , a__=0.02 , a__=1E-12 , a__=True , a__=0 , a__=1 , a__=2 , a__=6_6 , a__="block_sparse" , a__=True , a__=False , a__=6_4 , a__=3 , a__=None , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
A_ = use_cache
A_ = rescale_embeddings
A_ = attention_type
A_ = use_bias
A_ = block_size
A_ = num_random_blocks
A_ = classifier_dropout
class __lowercase ( A ):
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 141 | 1 |
'''simple docstring'''
from typing import Any
def _lowerCamelCase ( lowerCamelCase_ : list , lowerCamelCase_ : list , lowerCamelCase_ : dict , lowerCamelCase_ : dict , lowerCamelCase_ : dict , ):
"""simple docstring"""
_validation(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# Creates data structures and fill initial step
UpperCAmelCase_ : dict = {}
UpperCAmelCase_ : dict = {}
for state in states_space:
UpperCAmelCase_ : Tuple = observations_space[0]
UpperCAmelCase_ : List[str] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Optional[int] = observations_space[o]
UpperCAmelCase_ : Union[str, Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase_ : List[Any] = ''
UpperCAmelCase_ : Dict = -1
for k_state in states_space:
UpperCAmelCase_ : Optional[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase_ : Optional[Any] = probability
UpperCAmelCase_ : int = k_state
# Update probabilities and pointers dicts
UpperCAmelCase_ : Any = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase_ : Optional[Any] = arg_max
# The final observation
UpperCAmelCase_ : Optional[Any] = observations_space[len(lowerCamelCase_ ) - 1]
# argmax for given final observation
UpperCAmelCase_ : Tuple = ''
UpperCAmelCase_ : Union[str, Any] = -1
for k_state in states_space:
UpperCAmelCase_ : Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase_ : str = probability
UpperCAmelCase_ : Dict = k_state
UpperCAmelCase_ : Optional[Any] = arg_max
# Process pointers backwards
UpperCAmelCase_ : Any = last_state
UpperCAmelCase_ : Optional[int] = []
for o in range(len(lowerCamelCase_ ) - 1 , -1 , -1 ):
result.append(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , ):
"""simple docstring"""
_validate_not_empty(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
_validate_lists(lowerCamelCase_ , lowerCamelCase_ )
_validate_dicts(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , ):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
"""simple docstring"""
_validate_list(lowerCamelCase_ , 'observations_space' )
_validate_list(lowerCamelCase_ , 'states_space' )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : str ):
"""simple docstring"""
if not isinstance(_object , lowerCamelCase_ ):
UpperCAmelCase_ : Dict = F'''{var_name} must be a list'''
raise ValueError(lowerCamelCase_ )
else:
for x in _object:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = F'''{var_name} must be a list of strings'''
raise ValueError(lowerCamelCase_ )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Any , ):
"""simple docstring"""
_validate_dict(lowerCamelCase_ , 'initial_probabilities' , lowerCamelCase_ )
_validate_nested_dict(lowerCamelCase_ , 'transition_probabilities' )
_validate_nested_dict(lowerCamelCase_ , 'emission_probabilities' )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : str ):
"""simple docstring"""
_validate_dict(_object , lowerCamelCase_ , lowerCamelCase_ )
for x in _object.values():
_validate_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : type , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if not isinstance(_object , lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(lowerCamelCase_ )
if not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for x in _object ):
UpperCAmelCase_ : Any = F'''{var_name} all keys must be strings'''
raise ValueError(lowerCamelCase_ )
if not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for x in _object.values() ):
UpperCAmelCase_ : Dict = 'nested dictionary ' if nested else ''
UpperCAmelCase_ : str = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 389 | '''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
for i in range(0 , lowerCamelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def _lowerCamelCase ( lowerCamelCase_ : List[Any] ):
"""simple docstring"""
for i in range(lowerCamelCase_ , 0 , -1 ):
for _ in range(lowerCamelCase_ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def _lowerCamelCase ( lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase_ ) # upper half
reverse_floyd(lowerCamelCase_ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
snake_case__ : int = 1
while K:
snake_case__ : List[str] = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
snake_case__ : List[Any] = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 389 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase ={
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["LayoutLMv3FeatureExtractor"]
__lowerCAmelCase =["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger()
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : nn.Module
__snake_case : List[nn.Module] = field(default_factory=lowerCAmelCase_ )
__snake_case : list = field(default_factory=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Tensor ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ ,nn.Convad ) or isinstance(lowerCamelCase__ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase__ )
def __call__( self : Tuple ,lowerCamelCase__ : Tensor ) -> Optional[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : nn.Module
__snake_case : nn.Module
__snake_case : int = 1
__snake_case : List = field(default_factory=lowerCAmelCase_ )
__snake_case : List = field(default_factory=lowerCAmelCase_ )
__snake_case : bool = True
def __call__( self : int ,lowerCamelCase__ : Tensor ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Tracker(self.dest )(lowerCamelCase__ ).parametrized
SCREAMING_SNAKE_CASE = Tracker(self.src )(lowerCamelCase__ ).parametrized
SCREAMING_SNAKE_CASE = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip ,lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while"""
F""" destination module has {len(lowerCamelCase__ )}.""" )
for dest_m, src_m in zip(lowerCamelCase__ ,lowerCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : nn.Module ) -> Any:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F"""Unexpected layer name {k}"""
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
SCREAMING_SNAKE_CASE = nn.ModuleDict(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tensor ) -> Union[str, Any]:
'''simple docstring'''
return get_trunk_forward_outputs(
lowerCamelCase__ ,out_feat_keys=lowerCamelCase__ ,feature_blocks=self._feature_blocks ,)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[int] ,lowerCamelCase__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
SCREAMING_SNAKE_CASE = self.convert_name_to_timm(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = partial(lambda: (timm.create_model(lowerCamelCase__ ,pretrained=lowerCamelCase__ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE = super().__getitem__(lowerCamelCase__ )
return val
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __getitem__( self : Tuple ,lowerCamelCase__ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE = RegNetModel
else:
SCREAMING_SNAKE_CASE = RegNetForImageClassification
return val
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> List[str]:
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = from_model_func()
SCREAMING_SNAKE_CASE = our_model_func(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE , raise_if_mismatch=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
SCREAMING_SNAKE_CASE = manually_copy_vissl_head(_SCREAMING_SNAKE_CASE , our_model.state_dict() , _SCREAMING_SNAKE_CASE )
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = our_model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = (
our_outputs.logits if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE = from_model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = from_output[-1] if type(_SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE = our_outputs.hidden_states[-1]
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE = 2_24 if """seer""" not in name else 3_84
# we can use the convnext one
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
print(F"""Pushed {name}""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = (1, num_labels)
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
SCREAMING_SNAKE_CASE = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , model_dir=str(_SCREAMING_SNAKE_CASE ) , map_location="""cpu""" )
SCREAMING_SNAKE_CASE = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE = files["""classy_state_dict"""]["""base_model"""]["""model"""]
SCREAMING_SNAKE_CASE = model_state_dict["""trunk"""]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE = partial(
_SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 116 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if point:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
F"""{type(_SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = F"""Expected a list of numbers as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Missing an input""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class snake_case__ :
def __init__( self : Tuple , _A : List[Any] , _A : Dict=13 , _A : List[str]=7 , _A : Optional[int]=False , _A : Dict=True , _A : Union[str, Any]=False , _A : Dict=False , _A : Tuple=19 , _A : int=32 , _A : Dict=5 , _A : Any=4 , _A : List[Any]=37 , _A : int="gelu" , _A : Dict=0.1 , _A : List[str]=0.1 , _A : List[str]=5_12 , _A : List[Any]=16 , _A : Any=2 , _A : str=0.02 , _A : str=3 , _A : int=4 , _A : Any=None , ) -> int:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Tuple = scope
def A ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Dict = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_A , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def A ( self : str , _A : Optional[int] , _A : List[Any] , _A : List[Any] , _A : Tuple , _A : Tuple , _A : str ) -> Union[str, Any]:
UpperCAmelCase_ : Any = EsmForProteinFolding(config=_A ).float()
model.to(_A )
model.eval()
UpperCAmelCase_ : List[Any] = model(_A , attention_mask=_A )
UpperCAmelCase_ : Dict = model(_A )
UpperCAmelCase_ : str = model(_A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def A ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = False
a_ = (EsmForProteinFolding,) if is_torch_available() else ()
a_ = ()
a_ = {} if is_torch_available() else {}
a_ = False
def A ( self : Any ) -> Tuple:
UpperCAmelCase_ : Dict = EsmFoldModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def A ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@unittest.skip('''Does not support attention outputs''' )
def A ( self : Optional[int] ) -> int:
pass
@unittest.skip
def A ( self : str ) -> Tuple:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def A ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def A ( self : List[str] ) -> Any:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def A ( self : Optional[Any] ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : Tuple ) -> List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : str ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : Tuple ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : List[str] ) -> Tuple:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : List[Any] ) -> int:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def A ( self : List[str] ) -> List[str]:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def A ( self : Any ) -> List[str]:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def A ( self : List[Any] ) -> int:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def A ( self : Dict ) -> Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def A ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def A ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def A ( self : Optional[int] ) -> Any:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def A ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Optional[Any] ) -> List[Any]:
pass
@require_torch
class snake_case__ ( UpperCamelCase):
@slow
def A ( self : List[str] ) -> int:
UpperCAmelCase_ : Any = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : Any = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Tuple = model(_A )['''positions''']
UpperCAmelCase_ : Optional[Any] = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _A , atol=1e-4 ) )
| 541 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __UpperCAmelCase ( A : Optional[int] ) -> List[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def __UpperCAmelCase ( A : str ) -> Optional[Any]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase_ : str = ord(A )
if not _is_chinese_char(A ):
return 0
return 1
def __UpperCAmelCase ( A : List[str] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = set()
for token in tokens:
UpperCAmelCase_ : str = len(A ) > 1 and is_chinese(A )
if chinese_word:
word_set.add(A )
UpperCAmelCase_ : Optional[int] = list(A )
return word_list
def __UpperCAmelCase ( A : List[str] , A : set() ) -> Optional[Any]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase_ : Dict = max([len(A ) for w in chinese_word_set] )
UpperCAmelCase_ : List[str] = bert_tokens
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, len(A )
while start < end:
UpperCAmelCase_ : str = True
if is_chinese(bert_word[start] ):
UpperCAmelCase_ : str = min(end - start , A )
for i in range(A , 1 , -1 ):
UpperCAmelCase_ : Tuple = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase_ : Union[str, Any] = '''##''' + bert_word[j]
UpperCAmelCase_ : Any = start + i
UpperCAmelCase_ : Optional[int] = False
break
if single_word:
start += 1
return bert_word
def __UpperCAmelCase ( A : List[str] , A : LTP , A : BertTokenizer ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = []
for i in range(0 , len(A ) , 1_0_0 ):
UpperCAmelCase_ : int = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws
UpperCAmelCase_ : Any = [get_chinese_word(A ) for r in res]
ltp_res.extend(A )
assert len(A ) == len(A )
UpperCAmelCase_ : Tuple = []
for i in range(0 , len(A ) , 1_0_0 ):
UpperCAmelCase_ : Optional[int] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=A , truncation=A , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(A ) == len(A )
UpperCAmelCase_ : Any = []
for input_ids, chinese_word in zip(A , A ):
UpperCAmelCase_ : Union[str, Any] = []
for id in input_ids:
UpperCAmelCase_ : Union[str, Any] = bert_tokenizer._convert_id_to_token(A )
input_tokens.append(A )
UpperCAmelCase_ : List[str] = add_sub_symbol(A , A )
UpperCAmelCase_ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A ):
if token[:2] == "##":
UpperCAmelCase_ : int = token[2:]
# save chinese tokens' pos
if len(A ) == 1 and _is_chinese_char(ord(A ) ):
ref_id.append(A )
ref_ids.append(A )
assert len(A ) == len(A )
return ref_ids
def __UpperCAmelCase ( A : List[Any] ) -> Tuple:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = f.readlines()
UpperCAmelCase_ : List[str] = [line.strip() for line in data if len(A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase_ : Tuple = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase_ : Optional[int] = prepare_ref(A , A , A )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : str = [json.dumps(A ) + '''\n''' for ref in ref_ids]
f.writelines(A )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_UpperCamelCase : Any = parser.parse_args()
main(args)
| 541 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__magic_name__ = logging.getLogger(__name__)
def _A ( __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , ):
"""simple docstring"""
lowerCamelCase__ = bnb_quantization_config.load_in_abit
lowerCamelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowerCamelCase__ = []
# custom device map
if isinstance(__lowercase , __lowercase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ = get_keys_to_not_convert(__lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowercase )
lowerCamelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ = []
lowerCamelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowercase )
# compatibility with peft
lowerCamelCase__ = load_in_abit
lowerCamelCase__ = load_in_abit
lowerCamelCase__ = get_parameter_device(__lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowerCamelCase__ = replace_with_bnb_layers(__lowercase , __lowercase , modules_to_not_convert=__lowercase )
# convert param to the right dtype
lowerCamelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowerCamelCase__ = getattr(__lowercase , __lowercase , __lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowercase ):
param.to(__lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCamelCase__ = replace_with_bnb_layers(
__lowercase , __lowercase , modules_to_not_convert=__lowercase )
lowerCamelCase__ = get_quantized_model_device_map(
__lowercase , __lowercase , __lowercase , max_memory=__lowercase , no_split_module_classes=__lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ = True
lowerCamelCase__ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
__lowercase , __lowercase , __lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowercase , offload_state_dict=__lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowercase , device_map=__lowercase , offload_dir=__lowercase )
def _A ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(__lowercase , __lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
lowerCamelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ = {}
lowerCamelCase__ = special_dtypes
lowerCamelCase__ = no_split_module_classes
lowerCamelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ = get_balanced_memory(
__lowercase , low_zero=(device_map == """balanced_low_0""") , max_memory=__lowercase , **__lowercase , )
lowerCamelCase__ = max_memory
lowerCamelCase__ = infer_auto_device_map(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _A ( __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__ = []
lowerCamelCase__ , lowerCamelCase__ = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _A ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , ):
"""simple docstring"""
lowerCamelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ = []
current_key_name.append(__lowercase )
if isinstance(__lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ = """.""".join(__lowercase )
lowerCamelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
lowerCamelCase__ = module.weight.data
if module.bias is not None:
lowerCamelCase__ = module.bias.data
bnb_module.requires_grad_(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
lowerCamelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ = _replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
lowerCamelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _A ( __lowercase ):
"""simple docstring"""
with init_empty_weights():
lowerCamelCase__ = deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ = find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
lowerCamelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ = sum(__lowercase , [] )
lowerCamelCase__ = len(__lowercase ) > 0
# Check if it is a base model
lowerCamelCase__ = False
if hasattr(__lowercase , """base_model_prefix""" ):
lowerCamelCase__ = not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ = list(model.named_children() )
lowerCamelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ = set(__lowercase ) - set(__lowercase )
lowerCamelCase__ = list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
lowerCamelCase__ = [""".weight""", """.bias"""]
lowerCamelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ = name.replace(__lowercase , """""" )
filtered_module_names.append(__lowercase )
return filtered_module_names
def _A ( __lowercase ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowercase , bnb.nn.Linearabit ):
return True
return False
def _A ( __lowercase ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__lowercase , __lowercase , 0 , dtype=__lowercase , value=__lowercase )
lowerCamelCase__ = param_name
lowerCamelCase__ = model
if "." in tensor_name:
lowerCamelCase__ = tensor_name.split(""".""" )
for split in splits[:-1]:
lowerCamelCase__ = getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowerCamelCase__ = new_module
lowerCamelCase__ = splits[-1]
# offload weights
lowerCamelCase__ = False
offload_weight(module._parameters[tensor_name] , __lowercase , __lowercase , index=__lowercase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , __lowercase , index=__lowercase , )
else:
offload_weight(__lowercase , __lowercase , __lowercase , index=__lowercase )
offload_weight(__lowercase , param_name.replace("""weight""" , """SCB""" ) , __lowercase , index=__lowercase )
set_module_tensor_to_device(__lowercase , __lowercase , """meta""" , dtype=__lowercase , value=torch.empty(*param.size() ) )
| 258 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCamelCase__ = Rectangle(height=0.2_5 , width=0.2_5 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = Text("""CPU""" , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = [mem.copy() for i in range(4 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = Text("""GPU""" , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = Text("""Model""" , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
target.move_to(SCREAMING_SNAKE_CASE_ )
model_arr.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
lowerCamelCase__ = Text("""Disk""" , font_size=24 )
lowerCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
disk.move_to([-4, -1.2_5, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = Square(0.3 )
input.set_fill(SCREAMING_SNAKE_CASE_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , SCREAMING_SNAKE_CASE_ , buff=0.5 )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.0_2 )
self.play(MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = Arrow(start=SCREAMING_SNAKE_CASE_ , end=SCREAMING_SNAKE_CASE_ , color=SCREAMING_SNAKE_CASE_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) )
lowerCamelCase__ = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.0_2}
self.play(
Write(SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , SCREAMING_SNAKE_CASE_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
lowerCamelCase__ = AnimationGroup(
FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(SCREAMING_SNAKE_CASE_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase__ = a_c
lowerCamelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(SCREAMING_SNAKE_CASE_ ) , FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , )
lowerCamelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.wait()
| 258 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("""lowercase""" , SCREAMING_SNAKE_CASE) != do_lower_case
or pre_tok_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE) != strip_accents
):
_lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type"""))
_lowerCamelCase : List[str] = do_lower_case
_lowerCamelCase : str = strip_accents
_lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = do_lower_case
def __getstate__( self) -> str:
_lowerCamelCase : Any = self.__dict__.copy()
_lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Tuple = d
_lowerCamelCase : List[Any] = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCamelCase : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> Any:
_lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
_lowerCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE)
return tuple(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
| 88 | """simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_3 , _SCREAMING_SNAKE_CASE=3_0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=3_2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3_7 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1_0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
a_ : List[Any] = parent
a_ : Any = batch_size
a_ : Optional[int] = image_size
a_ : Optional[int] = patch_size
a_ : Any = num_channels
a_ : int = is_training
a_ : Dict = use_labels
a_ : Dict = hidden_size
a_ : List[str] = num_hidden_layers
a_ : str = num_attention_heads
a_ : Tuple = intermediate_size
a_ : Tuple = hidden_act
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : List[str] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ : Tuple = (image_size // patch_size) ** 2
a_ : Optional[int] = num_patches + 1
def A ( self ) -> str:
a_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Dict = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Optional[int]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
a_ : Tuple = ViTMSNModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
a_ : Any = self.type_sequence_label_size
a_ : Union[str, Any] = ViTMSNForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : str = 1
a_ : Dict = ViTMSNForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self ) -> List[str]:
a_ : str = self.prepare_config_and_inputs()
a_ , a_ , a_ : Any = config_and_inputs
a_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : List[str] = False
def A ( self ) -> int:
a_ : Dict = ViTMSNModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def A ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def A ( self ) -> List[Any]:
pass
def A ( self ) -> str:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A ( self ) -> Optional[Any]:
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Tuple = [*signature.parameters.keys()]
a_ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A ( self ) -> str:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A ( self ) -> Tuple:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A ( self ) -> List[str]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Optional[Any] = ViTMSNModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ () -> Dict:
a_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self ) -> Dict:
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def A ( self ) -> Optional[Any]:
torch.manual_seed(2 )
a_ : Union[str, Any] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_SCREAMING_SNAKE_CASE )
a_ : Dict = self.default_image_processor
a_ : Any = prepare_img()
a_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
a_ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 473 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
snake_case_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
snake_case_ = resnets
snake_case_ = attentions
if self.add_downsample:
snake_case_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , __lowercase : Any , __lowercase : str , __lowercase : List[Any] , __lowercase : str=True ):
"""simple docstring"""
snake_case_ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
snake_case_ = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 1
lowerCAmelCase_ = True
lowerCAmelCase_ = jnp.floataa
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
snake_case_ = resnets
if self.add_downsample:
snake_case_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : List[str]=True ):
"""simple docstring"""
snake_case_ = ()
for resnet in self.resnets:
snake_case_ = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = []
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
snake_case_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
snake_case_ = resnets
snake_case_ = attentions
if self.add_upsample:
snake_case_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[Any] , __lowercase : Optional[Any]=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ = res_hidden_states_tuple[-1]
snake_case_ = res_hidden_states_tuple[:-1]
snake_case_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
snake_case_ = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
snake_case_ = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 1
lowerCAmelCase_ = True
lowerCAmelCase_ = jnp.floataa
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
snake_case_ = resnets
if self.add_upsample:
snake_case_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , __lowercase : int , __lowercase : List[str] , __lowercase : Any , __lowercase : str=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
snake_case_ = res_hidden_states_tuple[-1]
snake_case_ = res_hidden_states_tuple[:-1]
snake_case_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
snake_case_ = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ = []
for _ in range(self.num_layers ):
snake_case_ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
snake_case_ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
snake_case_ = resnets
snake_case_ = attentions
def __call__( self : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : List[str] , __lowercase : int=True ):
"""simple docstring"""
snake_case_ = self.resnets[0](UpperCAmelCase__ , UpperCAmelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
snake_case_ = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
return hidden_states
| 700 |
def lowerCamelCase__ ( _A = 600851475143 ):
'''simple docstring'''
try:
snake_case_ = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ = 2
snake_case_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ = i
while n % i == 0:
snake_case_ = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase_ = {
"google/rembert": 2_5_6,
}
class a ( __UpperCAmelCase ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=False , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]="[CLS]" , snake_case__ : str="[SEP]" , snake_case__ : Union[str, Any]="[UNK]" , snake_case__ : List[Any]="[SEP]" , snake_case__ : Any="[PAD]" , snake_case__ : Union[str, Any]="[CLS]" , snake_case__ : Dict="[MASK]" , **snake_case__ : List[Any] , ):
"""simple docstring"""
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case__ )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
__lowerCAmelCase = d
__lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Any=False ):
"""simple docstring"""
__lowerCAmelCase = self.sp_model.EncodeAsPieces(snake_case__ )
return pieces
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
return self.sp_model.PieceToId(snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(snake_case__ )
def UpperCAmelCase__ ( self : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.sp_model.decode_pieces(snake_case__ )
return out_string
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case__ ) )
return
__lowerCAmelCase = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 611 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple ):
"""simple docstring"""
__lowerCAmelCase = 1.5
__lowerCAmelCase = int(factor * num_class_images )
__lowerCAmelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__lowerCAmelCase = client.query(text=UpperCamelCase )
if len(UpperCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
__lowerCAmelCase = int(factor * num_images )
__lowerCAmelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=UpperCamelCase , aesthetic_weight=0.1 , )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = tqdm(desc="downloading real regularization images" , total=UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , "w" ) as fa, open(F"{class_data_dir}/urls.txt" , "w" ) as fa, open(
F"{class_data_dir}/images.txt" , "w" ) as fa:
while total < num_class_images:
__lowerCAmelCase = class_images[count]
count += 1
try:
__lowerCAmelCase = requests.get(images["url"] )
if img.status_code == 2_0_0:
__lowerCAmelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = argparse.ArgumentParser("" , add_help=UpperCamelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=UpperCamelCase , type=UpperCamelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=UpperCamelCase , type=UpperCamelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=2_0_0 , type=UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 611 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=2_4 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1_0_0_0 , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Optional[int] = seq_length
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : int = use_input_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : Tuple = use_labels
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Dict = max_position_embeddings
UpperCamelCase__ : str = type_vocab_size
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : List[str] = num_labels
UpperCamelCase__ : Optional[int] = scope
UpperCamelCase__ : str = range_bbox
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ : Dict = bbox[i, j, 3]
UpperCamelCase__ : Any = bbox[i, j, 1]
UpperCamelCase__ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ : Optional[int] = bbox[i, j, 2]
UpperCamelCase__ : Dict = bbox[i, j, 0]
UpperCamelCase__ : str = t
UpperCamelCase__ : Dict = None
if self.use_input_mask:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Any = None
if self.use_labels:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = LiltModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Dict = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.num_labels
UpperCamelCase__ : int = LiltForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = LiltForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Tuple = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
(
UpperCamelCase__
) : Union[str, Any] = config_and_inputs
UpperCamelCase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return True
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = LiltModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : Any = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = LiltModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = torch.tensor([[1, 2]] , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : str = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = torch.Size([1, 2, 7_6_8] )
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 717 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase =False
lowerCamelCase =True
lowerCamelCase =False
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase =parser.parse_args()
lowerCamelCase ={
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
lowerCamelCase ={
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
lowerCamelCase ="" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
lowerCamelCase =reader.read()
lowerCamelCase =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
lowerCamelCase =UNetaDModel(**config)
else:
lowerCamelCase =UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
lowerCamelCase =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase =config[key]
del config[key]
lowerCamelCase =[k.replace("UNetRes", "") for k in config["down_block_types"]]
lowerCamelCase =[k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
lowerCamelCase =torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
lowerCamelCase ={}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
lowerCamelCase =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
lowerCamelCase =param_value
lowerCamelCase =True
if not has_changed:
lowerCamelCase =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 462 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> None:
lowercase__ : Optional[int] = num_of_nodes
lowercase__ : list[list[int]] = []
lowercase__ : dict[int, int] = {}
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase__ : int = self.find_component(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
if component_size[u_node] <= component_size[v_node]:
lowercase__ : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
lowercase__ : Dict = self.find_component(lowerCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase__ )
def UpperCAmelCase__( self ) -> None:
lowercase__ : List[str] = []
lowercase__ : List[Any] = 0
lowercase__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase__ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase__ , lowercase__ , lowercase__ : Any = edge
lowercase__ : Optional[Any] = self.m_component[u]
lowercase__ : Optional[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase__ : List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[Any] = self.m_component[u]
lowercase__ : List[str] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase__ : List[str] = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def _lowerCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 200 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = 'Hello world! cécé herlolip'
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : bool ):
lowercase__ : int = FairseqRobertaModel.from_pretrained(lowerCamelCase__ )
roberta.eval() # disable dropout
lowercase__ : Tuple = roberta.model.encoder.sentence_encoder
lowercase__ : Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowercase__ : Any = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , lowerCamelCase__ )
lowercase__ : List[Any] = XLMRobertaXLForSequenceClassification(lowerCamelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase__ : int = roberta_sent_encoder.embed_tokens.weight
lowercase__ : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
lowercase__ : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase__ : int = roberta_sent_encoder.layer_norm.weight
lowercase__ : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase__ : BertLayer = model.roberta.encoder.layer[i]
lowercase__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowercase__ : RobertaAttention = layer.attention
lowercase__ : str = roberta_layer.self_attn_layer_norm.weight
lowercase__ : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase__ : Optional[Any] = roberta_layer.self_attn.q_proj.weight
lowercase__ : str = roberta_layer.self_attn.q_proj.bias
lowercase__ : Optional[int] = roberta_layer.self_attn.k_proj.weight
lowercase__ : Optional[int] = roberta_layer.self_attn.k_proj.bias
lowercase__ : int = roberta_layer.self_attn.v_proj.weight
lowercase__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase__ : Any = roberta_layer.self_attn.out_proj.weight
lowercase__ : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase__ : Optional[Any] = roberta_layer.final_layer_norm.weight
lowercase__ : Any = roberta_layer.final_layer_norm.bias
# intermediate
lowercase__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase__ : Dict = roberta_layer.fca.weight
lowercase__ : Any = roberta_layer.fca.bias
# output
lowercase__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase__ : Union[str, Any] = roberta_layer.fca.weight
lowercase__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
lowercase__ : str = roberta.model.classification_heads["""mnli"""].dense.bias
lowercase__ : str = roberta.model.classification_heads["""mnli"""].out_proj.weight
lowercase__ : List[str] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowercase__ : Tuple = roberta.model.encoder.lm_head.dense.weight
lowercase__ : int = roberta.model.encoder.lm_head.dense.bias
lowercase__ : Any = roberta.model.encoder.lm_head.layer_norm.weight
lowercase__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowercase__ : Dict = roberta.model.encoder.lm_head.weight
lowercase__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase__ : torch.Tensor = roberta.encode(lowerCamelCase__ ).unsqueeze(0 ) # batch of size 1
lowercase__ : Any = model(lowerCamelCase__ )[0]
if classification_head:
lowercase__ : Optional[Any] = roberta.model.classification_heads["""mnli"""](roberta.extract_features(lowerCamelCase__ ) )
else:
lowercase__ : Tuple = roberta.model(lowerCamelCase__ )[0]
print(our_output.shape , their_output.shape )
lowercase__ : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase__ : int = torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(lowerCamelCase__ ).mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 200 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : str = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 | import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt'''}
__lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
__lowerCamelCase : Optional[Any] = {
'''openbmb/cpm-ant-10b''': 1024,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ = index
return vocab
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : int="<unk>" , _lowercase : int=2_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = max_input_chars_per_word
def __a ( self : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
while start < len(_lowercase ):
SCREAMING_SNAKE_CASE__ = len(_lowercase )
SCREAMING_SNAKE_CASE__ = None
while start < end:
SCREAMING_SNAKE_CASE__ = """""".join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
SCREAMING_SNAKE_CASE__ = end
return sub_tokens
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = False
def __init__( self : int , _lowercase : str , _lowercase : List[Any]="<d>" , _lowercase : List[Any]="</d>" , _lowercase : Union[str, Any]="<s>" , _lowercase : List[str]="</s>" , _lowercase : str="<pad>" , _lowercase : int="<unk>" , _lowercase : List[str]="</n>" , _lowercase : Tuple="</_>" , _lowercase : Any="left" , **_lowercase : Any , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ = bod_token
SCREAMING_SNAKE_CASE__ = eod_token
SCREAMING_SNAKE_CASE__ = load_vocab(_lowercase )
SCREAMING_SNAKE_CASE__ = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def __a ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Union[str, Any] , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def __a ( self : int , _lowercase : Any , **_lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
return token in self.encoder
def __a ( self : List[str] , _lowercase : List[str] ):
"""simple docstring"""
return "".join(_lowercase )
def __a ( self : Optional[int] , _lowercase : Any ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _lowercase : List[Any] ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
SCREAMING_SNAKE_CASE__ = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
SCREAMING_SNAKE_CASE__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
SCREAMING_SNAKE_CASE__ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder["""\n"""]
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __a ( self : int , _lowercase : List[int] , _lowercase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 379 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=A__ , required=A__ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=A__ , required=A__ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=A__ )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(A__ )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(A__ , A__ )
__lowercase= os.path.join(A__ , A__ )
if not os.path.isfile(A__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(A__ , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__lowercase= """"""
continue
elif "from absl import logging" in out_line:
__lowercase= """from datasets import logging\n"""
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , A__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A__ ) + '\n' )
out_lines.append(A__ )
out_lines.append(A__ )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(A__ , A__ , A__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , A__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(A__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(A__ , A__ )
__lowercase= os.path.join(A__ , A__ )
os.makedirs(A__ , exist_ok=A__ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A__ )
if needs_manual_update:
with_manual_update.append(A__ )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.writelines(A__ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(A__ )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(A__ , A__ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 230 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_50, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=A__ , )
assert hasattr(self , """env""" )
def __A ( self , A__ ):
A__ : int = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
A__ : str = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A__ , instance_count=A__ , instance_type=self.instance_type , debugger_hook_config=A__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A__ , py_version="""py36""" , )
def __A ( self , A__ ):
TrainingJobAnalytics(A__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self , A__ ):
# create estimator
A__ : str = self.create_estimator(A__ )
# run training
estimator.fit()
# result dataframe
A__ : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A__ )
| 456 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ (UpperCamelCase : bool = True , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_a = False
if main_process_only:
_a = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase , **UpperCamelCase , disable=UpperCamelCase )
| 377 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = []
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
return self.node_position[vertex]
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = pos
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_a = 2 * start + 1
else:
_a = 2 * start + 2
if heap[smallest_child] < heap[start]:
_a , _a = heap[smallest_child], positions[smallest_child]
_a , _a = (
heap[start],
positions[start],
)
_a , _a = temp, tempa
_a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase_ )
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Any:
"""simple docstring"""
_a = position[index]
while index != 0:
_a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_a = heap[parent]
_a = position[parent]
self.set_position(position[parent] , lowerCAmelCase_ )
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , lowerCAmelCase_ )
break
_a = parent
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , 0 )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = len(lowerCAmelCase_ ) // 2 - 1
for i in range(lowerCAmelCase_ , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
_a = positions[0]
_a = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return temp
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = Heap()
_a = [0] * len(UpperCamelCase )
_a = [-1] * len(UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_a = [] # Heap of Distance of vertices from their neighboring vertex
_a = []
for vertex in range(len(UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase )
heap.node_position.append(UpperCamelCase )
_a = []
_a = 1
_a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_a = 0
_a = distance
heap.heapify(UpperCamelCase , UpperCamelCase )
for _ in range(1 , len(UpperCamelCase ) ):
_a = heap.delete_minimum(UpperCamelCase , UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase )]
):
_a = distance
heap.bottom_to_top(
UpperCamelCase , heap.get_position(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
_a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_snake_case : List[str] = int(input('Enter number of edges: ').strip())
_snake_case : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
_snake_case : Tuple = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 377 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 100 , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = x_start
UpperCAmelCase__ : List[Any] = fnc(__UpperCamelCase )
UpperCAmelCase__ : Any = 0.0
for _ in range(__UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase__ : str = (x_end - x_start) / steps + xa
UpperCAmelCase__ : Optional[int] = fnc(__UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase__ : int = xa
UpperCAmelCase__ : Dict = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__UpperCAmelCase = 10
while i <= 10_0000:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 65 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : Optional[int] = None
_lowercase : Optional[jnp.ndarray] = None
_lowercase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
return cls()
@dataclass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : KarrasVeSchedulerState
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , _lowercase = 0.02 , _lowercase = 100 , _lowercase = 1.007 , _lowercase = 80 , _lowercase = 0.05 , _lowercase = 50 , ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def _lowercase ( self , _lowercase , _lowercase , _lowercase = () ):
"""simple docstring"""
_lowerCAmelCase = jnp.arange(0 , _lowercase )[::-1].copy()
_lowerCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_lowercase , schedule=jnp.array(_lowercase , dtype=jnp.floataa ) , timesteps=_lowercase , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase = random.split(_lowercase , num=1 )
_lowerCAmelCase = self.config.s_noise * random.normal(key=_lowercase , shape=sample.shape )
_lowerCAmelCase = sigma + gamma * sigma
_lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = sample_hat + sigma_hat * model_output
_lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowercase , derivative=_lowercase , state=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = sample_prev + sigma_prev * model_output
_lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowercase , derivative=_lowercase , state=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
raise NotImplementedError()
| 162 |
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int] ):
if len(__lowerCamelCase ) == 0:
return array
_lowerCAmelCase , _lowerCAmelCase = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
_lowerCAmelCase = _max - _min + 1
_lowerCAmelCase , _lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase = i - _min
_lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
_lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by comma:\n""")
_lowercase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 162 | 1 |
'''simple docstring'''
__A : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
__A : Tuple = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def UpperCAmelCase ( lowerCamelCase_ :float , lowerCamelCase_ :str , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : int = from_type.lower().strip("""s""" )
snake_case_ : Optional[int] = to_type.lower().strip("""s""" )
snake_case_ : Optional[Any] = UNIT_SYMBOL.get(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : int = UNIT_SYMBOL.get(lowerCamelCase_ , lowerCamelCase_ )
if from_sanitized not in METRIC_CONVERSION:
snake_case_ : Any = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowerCamelCase_ )}'''
)
raise ValueError(lowerCamelCase_ )
if to_sanitized not in METRIC_CONVERSION:
snake_case_ : str = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowerCamelCase_ )}'''
)
raise ValueError(lowerCamelCase_ )
snake_case_ : Optional[int] = METRIC_CONVERSION[from_sanitized]
snake_case_ : int = METRIC_CONVERSION[to_sanitized]
snake_case_ : Optional[int] = 1
if from_exponent > to_exponent:
snake_case_ : Dict = from_exponent - to_exponent
else:
snake_case_ : Optional[int] = -(to_exponent - from_exponent)
return value * pow(10 , lowerCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 334 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A : List[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
snake_case_ : int = []
for num in range(len(lowerCamelCase_ ) ):
snake_case_ : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
snake_case_ : List[str] = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def UpperCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }') | 334 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCamelCase : int = logging.getLogger(__name__)
def _a ():
"""simple docstring"""
_UpperCamelCase =argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__SCREAMING_SNAKE_CASE , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__SCREAMING_SNAKE_CASE , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__SCREAMING_SNAKE_CASE , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__SCREAMING_SNAKE_CASE , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__SCREAMING_SNAKE_CASE , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__SCREAMING_SNAKE_CASE , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__SCREAMING_SNAKE_CASE , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
_UpperCamelCase =parser.parse_args()
return args
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def fn(__SCREAMING_SNAKE_CASE ):
return tokenizer(examples['''text'''] )
return fn
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
for i in range(len(tokenized_data['''input_ids'''] ) ):
_UpperCamelCase ={
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
_UpperCamelCase =tf.train.Features(feature=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =tf.train.Example(features=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =example.SerializeToString()
records.append(__SCREAMING_SNAKE_CASE )
return records
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCamelCase =min(len(__SCREAMING_SNAKE_CASE ) , args.limit )
_UpperCamelCase =dataset.select(range(__SCREAMING_SNAKE_CASE ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
_UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCamelCase =os.path.join(args.output_dir , args.split )
if not os.path.exists(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
else:
_UpperCamelCase =os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCamelCase =tokenize_function(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =dataset.map(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__SCREAMING_SNAKE_CASE ):
# Concatenate all texts.
_UpperCamelCase ={k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCamelCase =len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCamelCase =(total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCamelCase ={
k: [t[i : i + args.max_length] for i in range(0 , __SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCamelCase =dataset_tokenized.map(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , batch_size=1000 , num_proc=4 )
_UpperCamelCase =0
_UpperCamelCase =0
for shard in range(0 , len(__SCREAMING_SNAKE_CASE ) , args.shard_size ):
_UpperCamelCase =grouped_dataset[shard : shard + args.shard_size]
_UpperCamelCase =len(dataset_snapshot['''input_ids'''] )
_UpperCamelCase =os.path.join(__SCREAMING_SNAKE_CASE , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCamelCase =get_serialized_examples(__SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(__SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
_UpperCamelCase =serialized_examples[i]
out_file.write(__SCREAMING_SNAKE_CASE )
print('''Wrote file {} containing {} records'''.format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase : List[str] = parse_args()
main(args)
| 271 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """mobilenet_v2"""
def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : str=1.0 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : str=32 , UpperCamelCase__ : str=True , UpperCamelCase__ : int=True , UpperCamelCase__ : str="relu6" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[str]=0.8 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : str=0.001 , UpperCamelCase__ : Dict=255 , **UpperCamelCase__ : Tuple , ) -> List[Any]:
super().__init__(**UpperCamelCase__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_UpperCamelCase =num_channels
_UpperCamelCase =image_size
_UpperCamelCase =depth_multiplier
_UpperCamelCase =depth_divisible_by
_UpperCamelCase =min_depth
_UpperCamelCase =expand_ratio
_UpperCamelCase =output_stride
_UpperCamelCase =first_layer_is_expansion
_UpperCamelCase =finegrained_output
_UpperCamelCase =hidden_act
_UpperCamelCase =tf_padding
_UpperCamelCase =classifier_dropout_prob
_UpperCamelCase =initializer_range
_UpperCamelCase =layer_norm_eps
_UpperCamelCase =semantic_loss_ignore_index
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCamelCase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCamelCase__ ( self : List[Any] ) -> float:
return 1E-4
| 271 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __A ( ):
"""simple docstring"""
__a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_lowerCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 197 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__lowerCamelCase : str = TypeVar("""T""")
class A__ ( Generic[T] ):
def __init__( self , A_ = True ):
'''simple docstring'''
UpperCamelCase : dict[T, list[T]] = {} # dictionary of lists
UpperCamelCase : Union[str, Any] = directed
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
self.adj_list[destination_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
UpperCamelCase : Any = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A_ )
UpperCamelCase : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCamelCase : Union[str, Any] = [destination_vertex]
UpperCamelCase : List[str] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A_ )
UpperCamelCase : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCamelCase : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCamelCase : Union[str, Any] = [destination_vertex]
UpperCamelCase : Dict = []
return self
def __repr__( self ):
'''simple docstring'''
return pformat(self.adj_list )
| 629 | 0 |
class _a:
def __init__( self , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
_snake_case : Any = name
_snake_case : str = value
_snake_case : Any = weight
def __repr__( self ) -> List[str]:
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase ( self ) -> str:
'''simple docstring'''
return self.value
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self.name
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.weight
def lowercase ( self ) -> int:
'''simple docstring'''
return self.value / self.weight
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : Dict = []
for i in range(len(UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : List[str] = sorted(UpperCAmelCase , key=UpperCAmelCase , reverse=UpperCAmelCase )
_snake_case : str = []
_snake_case , _snake_case : str = 0.0, 0.0
for i in range(len(UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 278 |
import datasets
from .evaluate import evaluate
__lowerCAmelCase :Any = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__lowerCAmelCase :Union[str, Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__lowerCAmelCase :Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a( datasets.Metric ):
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowercase ( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : str = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_snake_case : Union[str, Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_snake_case : Union[str, Any] = evaluate(dataset=__snake_case , predictions=__snake_case )
return score | 278 | 1 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Any = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class A :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : dict[str, list[str]] , _UpperCamelCase : str):
_lowercase: Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
_lowercase: dict[str, str | None] = {}
_lowercase: Optional[int] = source_vertex
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Optional[int] = {self.source_vertex}
_lowercase: str = None
_lowercase: Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
_lowercase: Any = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCamelCase)
_lowercase: Tuple = vertex
queue.append(_UpperCamelCase)
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : str):
if target_vertex == self.source_vertex:
return self.source_vertex
_lowercase: List[Any] = self.parent.get(_UpperCamelCase)
if target_vertex_parent is None:
_lowercase: List[Any] = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_UpperCamelCase)
return self.shortest_path(_UpperCamelCase) + f"->{target_vertex}"
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 226 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 226 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCAmelCase_ (lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCAmelCase__ = s_dict.pop(lowercase__ )
elif "subsample" in key:
lowerCAmelCase__ = s_dict.pop(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = torch.load(lowercase__ , map_location='''cpu''' )
lowerCAmelCase__ = mam_aaa['''args''']
lowerCAmelCase__ = mam_aaa['''model''']
lowerCAmelCase__ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(lowercase__ )
rename_keys(lowercase__ )
lowerCAmelCase__ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase__ = args.share_decoder_input_output_embed
lowerCAmelCase__ = [int(lowercase__ ) for i in args.conv_kernel_sizes.split(''',''' )]
lowerCAmelCase__ = SpeechaTextConfig(
vocab_size=lowercase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(lowercase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowercase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowercase__ , num_beams=5 , max_length=2_00 , use_cache=lowercase__ , decoder_start_token_id=2 , early_stopping=lowercase__ , )
lowerCAmelCase__ = SpeechaTextForConditionalGeneration(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = model.model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0 and not set(lowercase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f' but all the following weights are missing {missing}' )
if tie_embeds:
lowerCAmelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase__ = lm_head_weights
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCAmelCase : Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 288 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowerCAmelCase__ = img
lowerCAmelCase__ = img.shape[1]
lowerCAmelCase__ = img.shape[0]
lowerCAmelCase__ = dst_width
lowerCAmelCase__ = dst_height
lowerCAmelCase__ = self.src_w / self.dst_w
lowerCAmelCase__ = self.src_h / self.dst_h
lowerCAmelCase__ = lowerCAmelCase__ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __snake_case ( self : List[str] ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCAmelCase__ = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )]
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
return int(self.ratio_x * x )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCAmelCase , _UpperCAmelCase : List[str] = 800, 600
_UpperCAmelCase : Tuple = imread("image_data/lena.jpg", 1)
_UpperCAmelCase : str = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 288 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('_T')
class a_ ( Generic[_T] ):
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> None:
"""simple docstring"""
UpperCamelCase = list(iterable or [] )
UpperCamelCase = []
def __len__( self ) -> int:
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> str:
"""simple docstring"""
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self._stacka.append(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> _T:
"""simple docstring"""
UpperCamelCase = self._stacka.pop
UpperCamelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 301 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase__ ( __UpperCamelCase )-> bytes:
if len(__UpperCamelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
UpperCamelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase__ ( __UpperCamelCase )-> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase = format(__UpperCamelCase , """08x""" )[-8:]
UpperCamelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def lowercase__ ( __UpperCamelCase )-> bytes:
UpperCamelCase = b""""""
for char in message:
bit_string += format(__UpperCamelCase , """08b""" ).encode("""utf-8""" )
UpperCamelCase = format(len(__UpperCamelCase ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase__ ( __UpperCamelCase )-> Generator[list[int], None, None]:
if len(__UpperCamelCase ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__UpperCamelCase ) , 512 ):
UpperCamelCase = bit_string[pos : pos + 512]
UpperCamelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase__ ( __UpperCamelCase )-> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase = format(__UpperCamelCase , """032b""" )
UpperCamelCase = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
return (a + b) % 2**32
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase__ ( __UpperCamelCase )-> bytes:
UpperCamelCase = preprocess(__UpperCamelCase )
UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase = 0X6745_2301
UpperCamelCase = 0XEFCD_AB89
UpperCamelCase = 0X98BA_DCFE
UpperCamelCase = 0X1032_5476
UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
UpperCamelCase = aa
UpperCamelCase = ba
UpperCamelCase = ca
UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase = d ^ (b & (c ^ d))
UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase = c ^ (d & (b ^ c))
UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase = b ^ c ^ d
UpperCamelCase = (3 * i + 5) % 16
else:
UpperCamelCase = c ^ (b | not_aa(__UpperCamelCase ))
UpperCamelCase = (7 * i) % 16
UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase = d
UpperCamelCase = c
UpperCamelCase = b
UpperCamelCase = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
snake_case_ = random.Random()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple=1.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE_ : Tuple = global_rng
SCREAMING_SNAKE_CASE_ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=400 , lowercase__=2000 , lowercase__=10 , lowercase__=160 , lowercase__=8 , lowercase__=0.0 , lowercase__=4000 , lowercase__=False , lowercase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = min_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = max_seq_length
SCREAMING_SNAKE_CASE_ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ : str = padding_value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = return_attention_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = chunk_length
SCREAMING_SNAKE_CASE_ : List[Any] = hop_length
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self , lowercase__=False , lowercase__=False ):
"""simple docstring"""
def _flatten(lowercase__ ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE_ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ : str = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_,unittest.TestCase ):
_A = WhisperFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = WhisperFeatureExtractionTester(self )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = self.feature_extraction_class.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ : int = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_ : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(lowercase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extraction_class.from_json_file(lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_ : Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE_ : Tuple = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor(lowercase__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_extractor(lowercase__ , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(lowercase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor(lowercase__ , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(lowercase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# Test truncation required
SCREAMING_SNAKE_CASE_ : str = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
SCREAMING_SNAKE_CASE_ : str = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE_ : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE_ : int = [np.asarray(lowercase__ ) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor(lowercase__ , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor(lowercase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ : Any = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ : List[str] = ds.sort("id" ).select(range(lowercase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
SCREAMING_SNAKE_CASE_ : str = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ : Dict = WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_extractor(lowercase__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase__ , atol=1e-4 ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : Dict = self._load_datasamples(1 )[0]
SCREAMING_SNAKE_CASE_ : str = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE_ : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase__ )[0]
self.assertTrue(np.all(np.mean(lowercase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ ) - 1 ) < 1e-3 ) )
| 718 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase = '''A red cat sitting on a park bench'''
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_UpperCAmelCase , output_type='''np''' , )
__lowerCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 175 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[Any] ={
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure) | 483 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : List[str] ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 100 ,lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : bool = True ,):
if audio_length_in_s is None:
UpperCAmelCase__ = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCAmelCase__ = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
UpperCAmelCase__ = int(lowerCamelCase__ )
UpperCAmelCase__ = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase__ = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=self.device ,dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ ,device=audio.device )
UpperCAmelCase__ = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ = self.unet(lowerCamelCase__ ,lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = audio.clamp(-1 ,1 ).float().cpu().numpy()
UpperCAmelCase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 718 | """simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = ""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
for keychar, cipherchar in zip(cycle(lowerCamelCase ) , lowerCamelCase ):
UpperCAmelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for key in product(lowerCamelCase , repeat=3 ):
UpperCAmelCase__ = try_key(lowerCamelCase , lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def a_ ( lowerCamelCase , lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCamelCase = "p059_cipher.txt" ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding='utf-8' )
UpperCAmelCase__ = [int(lowerCamelCase ) for number in data.strip().split(',' )]
UpperCAmelCase__ = filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase__ = filter_common_word(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
UpperCAmelCase__ = possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase ):
UpperCamelCase_ : Dict = data
def __iter__( self ):
for element in self.data:
yield element
def snake_case ( a_ : Union[str, Any]=True ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = Accelerator(even_batches=a_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def snake_case ( a_ : Accelerator , a_ : int , a_ : int , a_ : bool = False ) -> Optional[Any]:
"""simple docstring"""
if iterable:
UpperCamelCase_ : Optional[int] = DummyIterableDataset(torch.as_tensor(range(a_ ) ) )
else:
UpperCamelCase_ : Optional[Any] = TensorDataset(torch.as_tensor(range(a_ ) ) )
UpperCamelCase_ : Any = DataLoader(a_ , batch_size=a_ )
UpperCamelCase_ : Tuple = accelerator.prepare(a_ )
return dl
def snake_case ( a_ : Accelerator , a_ : int , a_ : int , a_ : List[int] , a_ : List[int] , ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = create_dataloader(accelerator=a_ , dataset_size=a_ , batch_size=a_ )
UpperCamelCase_ : List[Any] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def snake_case ( ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = create_accelerator(even_batches=a_ )
verify_dataloader_batch_sizes(
a_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def snake_case ( ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = create_accelerator(even_batches=a_ )
UpperCamelCase_ : str = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : Union[str, Any] = accelerator.prepare(a_ )
UpperCamelCase_ : List[Any] = create_dataloader(a_ , dataset_size=3 , batch_size=1 )
UpperCamelCase_ : Optional[int] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a_ ):
UpperCamelCase_ : Optional[Any] = ddp_model(batch[0].float() )
UpperCamelCase_ : Optional[int] = output.sum()
loss.backward()
batch_idxs.append(a_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def snake_case ( a_ : Tuple ) -> List[Any]:
"""simple docstring"""
with warnings.catch_warnings(record=a_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Any = create_accelerator(even_batches=a_ )
UpperCamelCase_ : Any = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : Tuple = accelerator.prepare(a_ )
UpperCamelCase_ : Tuple = create_dataloader(a_ , dataset_size=3 , batch_size=1 )
UpperCamelCase_ : int = create_dataloader(a_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a_ ):
UpperCamelCase_ : Union[str, Any] = train_dl.batch_sampler.even_batches
UpperCamelCase_ : Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def snake_case ( ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = create_accelerator(even_batches=a_ )
UpperCamelCase_ : str = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : Dict = accelerator.prepare(a_ )
create_dataloader(a_ , dataset_size=3 , batch_size=1 , iterable=a_ )
UpperCamelCase_ : Tuple = create_dataloader(a_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a_ ):
UpperCamelCase_ : List[str] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = create_accelerator()
UpperCamelCase_ : Optional[Any] = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : str = accelerator.prepare(a_ )
create_dataloader(a_ , dataset_size=3 , batch_size=1 , iterable=a_ )
with warnings.catch_warnings(record=a_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a_ ):
pass
assert issubclass(w[-1].category , a_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def snake_case ( ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
UpperCamelCase_ : Union[str, Any] = accelerator.state.distributed_type
UpperCamelCase_ : List[str] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a_ )
UpperCamelCase_ : Dict = original_state
if __name__ == "__main__":
main()
| 208 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def snake_case ( a_ : float , a_ : float , a_ : float ) -> tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase = 400_0000 ) -> int:
lowerCamelCase__ =[0, 1]
lowerCamelCase__ =0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ =0
for j in range(len(__lowerCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 | """simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a =1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=16 , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=14 , _lowerCamelCase=10 , _lowerCamelCase=19 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=True , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=[1, 2, 3, 4, 5] , _lowerCamelCase=25 , _lowerCamelCase=5 , ):
lowerCamelCase__ =d_model
lowerCamelCase__ =parent
lowerCamelCase__ =batch_size
lowerCamelCase__ =prediction_length
lowerCamelCase__ =context_length
lowerCamelCase__ =cardinality
lowerCamelCase__ =num_time_features
lowerCamelCase__ =lags_sequence
lowerCamelCase__ =embedding_dimension
lowerCamelCase__ =is_training
lowerCamelCase__ =hidden_size
lowerCamelCase__ =num_hidden_layers
lowerCamelCase__ =num_attention_heads
lowerCamelCase__ =intermediate_size
lowerCamelCase__ =hidden_act
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =context_length
lowerCamelCase__ =prediction_length + label_length
lowerCamelCase__ =label_length
lowerCamelCase__ =moving_average
lowerCamelCase__ =autocorrelation_factor
def _a ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =config.context_length + max(config.lags_sequence )
lowerCamelCase__ =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length] )
lowerCamelCase__ =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase__ =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase__ =floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase__ ={
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _a ( self ):
lowerCamelCase__ =self.get_config()
lowerCamelCase__ =self.prepare_autoformer_inputs_dict(_lowerCamelCase )
return config, inputs_dict
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =AutoformerModel(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
lowerCamelCase__ =model(**_lowerCamelCase )
lowerCamelCase__ =outputs.encoder_last_hidden_state
lowerCamelCase__ =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ =model.get_encoder()
encoder.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =AutoformerEncoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =model.create_network_inputs(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase__ =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase__ =encoder(inputs_embeds=_lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCamelCase__ =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase__ =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase__ =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase__ =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ =model.get_decoder()
decoder.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =AutoformerDecoder.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
lowerCamelCase__ =decoder(
trend=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
A__ : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A__ : Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
A__ : List[Any] = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
A__ : Optional[Any] = False
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[Any] = False
A__ : Union[str, Any] = False
A__ : Tuple = False
def _a ( self ):
lowerCamelCase__ =AutoformerModelTester(self )
lowerCamelCase__ =ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =model_class.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertEqual(info["missing_keys"] , [] )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowerCamelCase )
@unittest.skip(reason="Model has no tokens embeddings" )
def _a ( self ):
pass
def _a ( self ):
lowerCamelCase__ =inspect.signature(getattr(_lowerCamelCase , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase__ =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ =model_class(_lowerCamelCase )
lowerCamelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ =[*signature.parameters.keys()]
lowerCamelCase__ =[
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(_lowerCamelCase )] , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ =True
lowerCamelCase__ =getattr(self.model_tester , "seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "decoder_seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "encoder_seq_length" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "d_model" , _lowerCamelCase )
lowerCamelCase__ =getattr(self.model_tester , "num_attention_heads" , _lowerCamelCase )
lowerCamelCase__ =d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase__ =len(_lowerCamelCase )
lowerCamelCase__ =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
# decoder attentions
lowerCamelCase__ =outputs.decoder_attentions
self.assertIsInstance(_lowerCamelCase , (list, tuple) )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase__ =outputs.cross_attentions
self.assertIsInstance(_lowerCamelCase , (list, tuple) )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(_lowerCamelCase ) )
lowerCamelCase__ =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _a ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase_ ( __lowerCAmelCase="train-batch.pt" ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowerCAmelCase , repo_type="dataset" )
lowerCamelCase__ =torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
return batch
@require_torch
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
lowerCamelCase__ =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch()
with torch.no_grad():
lowerCamelCase__ =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase__ =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _a ( self ):
lowerCamelCase__ =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase__ =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase__ =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _a ( self ):
lowerCamelCase__ =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_lowerCamelCase )
lowerCamelCase__ =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase__ =model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase__ =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _lowerCamelCase )
lowerCamelCase__ =torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=_lowerCamelCase )
lowerCamelCase__ =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _lowerCamelCase , rtol=1E-1 ) )
| 132 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Dict = IFInpaintingSuperResolutionPipeline
snake_case__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
snake_case__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> Dict:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 570 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Any , __A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a_ : str = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ : str = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
a_ : Optional[int] = F"""{src_lang}-{tgt_lang}"""
a_ : str = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
a_ : Optional[int] = os.path.join(__A , 'README.md' )
print(F"""Generating {path}""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(__A )
# make sure we are under the root of the project
UpperCAmelCase_ : List[str] = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase_ : Union[str, Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = model_name.split('-')
UpperCAmelCase_ : Any = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 570 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__magic_name__ ):
lowercase_ = ["onnx"]
def __init__( self : Union[str, Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ["onnx"])
@classmethod
def a_ ( cls : Optional[int] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ["onnx"])
@classmethod
def a_ ( cls : List[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ["onnx"])
| 487 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class a__ ( __magic_name__ ):
lowercase_ = ["image_processor", "feature_extractor"]
lowercase_ = "TvltImageProcessor"
lowercase_ = "TvltFeatureExtractor"
def __init__( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict):
"""simple docstring"""
super().__init__(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = image_processor
__UpperCAmelCase : Dict = feature_extractor
def __call__( self : Tuple , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[int]=False , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : str , ):
"""simple docstring"""
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process.")
__UpperCAmelCase : Optional[Any] = None
if images is not None:
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , mask_pixel=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
if images_mixed is not None:
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , is_mixed=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
if audio is not None:
__UpperCAmelCase : List[Any] = self.feature_extractor(
UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , mask_audio=UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : List[str] = {}
if audio is not None:
output_dict.update(UpperCamelCase_)
if images is not None:
output_dict.update(UpperCamelCase_)
if images_mixed_dict is not None:
output_dict.update(UpperCamelCase_)
return output_dict
@property
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.image_processor.model_input_names
__UpperCAmelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 487 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(UpperCAmelCase__ )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Optional[int] = '''rag'''
lowercase_ : Any = True
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=" / " , __lowerCAmelCase : str=" // " , __lowerCAmelCase : int=5 , __lowerCAmelCase : str=3_0_0 , __lowerCAmelCase : Any=7_6_8 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : Union[str, Any]="wiki_dpr" , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : Dict="compressed" , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Any=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
bos_token_id=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , prefix=__lowerCAmelCase , vocab_size=__lowerCAmelCase , **__lowerCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__snake_case = kwargs.pop('question_encoder' )
__snake_case = question_encoder_config.pop('model_type' )
__snake_case = kwargs.pop('generator' )
__snake_case = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__snake_case = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = reduce_loss
__snake_case = label_smoothing
__snake_case = exclude_bos_score
__snake_case = do_marginalize
__snake_case = title_sep
__snake_case = doc_sep
__snake_case = n_docs
__snake_case = max_combined_length
__snake_case = dataset
__snake_case = dataset_split
__snake_case = index_name
__snake_case = retrieval_vector_size
__snake_case = retrieval_batch_size
__snake_case = passages_path
__snake_case = index_path
__snake_case = use_dummy_dataset
__snake_case = output_retrieved
__snake_case = do_deduplication
__snake_case = use_cache
if self.forced_eos_token_id is None:
__snake_case = getattr(self.generator , 'forced_eos_token_id' , __lowerCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : List[str] ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.question_encoder.to_dict()
__snake_case = self.generator.to_dict()
__snake_case = self.__class__.model_type
return output
| 356 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = int(a )
if n_element < 1:
__snake_case = ValueError('a should be a positive number' )
raise my_error
__snake_case = [1]
__snake_case , __snake_case , __snake_case = (0, 0, 0)
__snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_lowercase = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""")
| 356 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
SCREAMING_SNAKE_CASE__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
SCREAMING_SNAKE_CASE__ = [0] * args.vocab_size
for k, v in counter.items():
SCREAMING_SNAKE_CASE__ = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 709 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
SCREAMING_SNAKE_CASE__ = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
SCREAMING_SNAKE_CASE__ = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def _snake_case ( self , lowercase , lowercase ) -> Any:
lowerCAmelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowerCAmelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowerCAmelCase = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 393 | 0 |
def _lowerCAmelCase ( UpperCamelCase__: list ) -> list:
"""simple docstring"""
A = len(snake_case_ )
for _ in range(snake_case_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
A , A = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowercase : Optional[int] = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 641 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _a ( ):
"""simple docstring"""
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=__lowerCAmelCase , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=__lowerCAmelCase , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=__lowerCAmelCase , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=__lowerCAmelCase , default=0 , help='''cuda_id.''' , )
snake_case__ : Union[str, Any] = parser.parse_args()
return args
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if not len(__lowerCAmelCase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
snake_case__ , snake_case__ : List[Any] = imgs[0].size
snake_case__ : Union[str, Any] = Image.new('''RGB''' , size=(cols * w, rows * h) )
snake_case__ , snake_case__ : str = grid.size
for i, img in enumerate(__lowerCAmelCase ):
grid.paste(__lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any="robotic cat with wings" , __lowerCAmelCase : Dict=7.5 , __lowerCAmelCase : List[Any]=50 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : List[str]=42 , ):
"""simple docstring"""
snake_case__ : List[str] = torch.Generator(pipeline.device ).manual_seed(__lowerCAmelCase )
snake_case__ : int = pipeline(
__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , generator=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , ).images
snake_case__ : Union[str, Any] = int(math.sqrt(__lowerCAmelCase ) )
snake_case__ : Optional[Any] = image_grid(__lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ : Dict = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ : Any = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
lowerCAmelCase__ : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
lowerCAmelCase__ : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
lowerCAmelCase__ : Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
lowerCAmelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ : Optional[Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
lowerCAmelCase__ : Tuple = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
lowerCAmelCase__ : Any = unet.to(torch.device("""cuda""", args.cuda_id))
lowerCAmelCase__ : Dict = pipeline.to(unet.device)
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
lowerCAmelCase__ : Any = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 502 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = FlaxAutoencoderKL
@property
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Any = 4
snake_case__ : Optional[Any] = 3
snake_case__ : Optional[int] = (3_2, 3_2)
snake_case__ : Optional[int] = jax.random.PRNGKey(0 )
snake_case__ : Union[str, Any] = jax.random.uniform(snake_case_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Any = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case__ : Dict = self.dummy_input
return init_dict, inputs_dict
| 502 | 1 |
from itertools import permutations
def __lowercase ( snake_case ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__magic_name__ :Union[str, Any] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(snake_case ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def __lowercase ( snake_case = 1_0 ):
"""simple docstring"""
return sum(
int(''''''.join(map(snake_case, snake_case ) ) )
for num in permutations(range(snake_case ) )
if is_substring_divisible(snake_case ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class a__ ( lowerCAmelCase__ ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ) -> Union[str, Any]:
super().__init__(features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , **lowercase__ )
__A = Sql(
cache_dir=lowercase__ , features=lowercase__ , sql=lowercase__ , con=lowercase__ , **lowercase__ , )
def _lowerCamelCase ( self ) -> List[Any]:
__A = None
__A = None
__A = None
__A = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , )
# Build dataset for splits
__A = self.builder.as_dataset(
split="train" , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> Optional[Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__A = dataset
__A = name
__A = con
__A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__A = num_proc
__A = to_sql_kwargs
def _lowerCamelCase ( self ) -> int:
__A = self.to_sql_kwargs.pop("sql" , lowercase__ )
__A = self.to_sql_kwargs.pop("con" , lowercase__ )
__A = self.to_sql_kwargs.pop("index" , lowercase__ )
__A = self._write(index=lowercase__ , **self.to_sql_kwargs )
return written
def _lowerCamelCase ( self , lowercase__ ) -> Union[str, Any]:
__A , __A , __A = args
__A = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
__A = query_table(
table=self.dataset.data , key=slice(lowercase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__A = batch.to_pandas()
__A = df.to_sql(self.name , self.con , index=lowercase__ , **lowercase__ )
return num_rows or len(lowercase__ )
def _lowerCamelCase ( self , lowercase__ , **lowercase__ ) -> int:
__A = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__A , __A = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowercase__ , lowercase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 205 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : str ={
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict =['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] =[
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] =[
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case_ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = prime_factors(_lowerCamelCase)
if is_square_free(_lowerCamelCase):
return -1 if len(_lowerCamelCase) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 557 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip | 557 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class __snake_case:
'''simple docstring'''
UpperCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase : bool = field(
default=_lowerCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
UpperCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase : bool = field(
default=_lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __snake_case:
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
UpperCAmelCase : bool = field(
default=_lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
UpperCAmelCase : Optional[int] = field(
default=_lowerCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase : bool = field(
default=_lowerCAmelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
UpperCAmelCase : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __snake_case ( self ) -> List[Any]:
if self.train_file is not None:
lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __snake_case:
'''simple docstring'''
UpperCAmelCase : PreTrainedTokenizerBase
UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[int] = None
def __call__( self , A_ ) -> int:
lowerCAmelCase = """label""" if """label""" in features[0].keys() else """labels"""
lowerCAmelCase = [feature.pop(A_ ) for feature in features]
lowerCAmelCase = len(A_ )
lowerCAmelCase = len(features[0]["""input_ids"""] )
lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(A_ )] for feature in features
]
lowerCAmelCase = list(chain(*A_ ) )
lowerCAmelCase = self.tokenizer.pad(
A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
lowerCAmelCase = {k: v.view(A_ , A_ , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase = torch.tensor(A_ , dtype=torch.intaa )
return batch
def _snake_case ( ) -> str:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase = {}
if data_args.train_file is not None:
lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase = data_args.validation_file
lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
lowerCAmelCase = load_dataset(
_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase = [f'ending{i}' for i in range(4 )]
lowerCAmelCase = """sent1"""
lowerCAmelCase = """sent2"""
if data_args.max_seq_length is None:
lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
lowerCAmelCase = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_SCREAMING_SNAKE_CASE : Dict ):
lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase = examples[question_header_name]
lowerCAmelCase = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(_SCREAMING_SNAKE_CASE )
]
# Flatten out
lowerCAmelCase = list(chain(*_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = list(chain(*_SCREAMING_SNAKE_CASE ) )
# Tokenize
lowerCAmelCase = tokenizer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowerCAmelCase = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
lowerCAmelCase = train_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowerCAmelCase = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowerCAmelCase = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
lowerCAmelCase = eval_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowerCAmelCase = eval_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_SCREAMING_SNAKE_CASE : Tuple ):
lowerCAmelCase, lowerCAmelCase = eval_predictions
lowerCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase = train_result.metrics
lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""train""" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("""train""" , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase = trainer.evaluate()
lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 344 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase = sorted(arg_to_scheduler.keys())
UpperCAmelCase = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class __snake_case( pl.LightningModule ):
'''simple docstring'''
def __init__( self , A_ , A_=None , A_="base" , A_=None , A_=None , A_=None , **A_ , ) -> List[Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(A_ )
lowerCAmelCase = 0
lowerCAmelCase = Path(self.hparams.output_dir )
lowerCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=A_ , **A_ , )
else:
lowerCAmelCase = config
lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , A_ , A_ ):
assert hasattr(self.config , A_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , A_ , getattr(self.hparams , A_ ) )
if tokenizer is None:
lowerCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=A_ , )
else:
lowerCAmelCase = tokenizer
lowerCAmelCase = MODEL_MODES[mode]
if model is None:
lowerCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=A_ , )
else:
lowerCAmelCase = model
def __snake_case ( self , *A_ , **A_ ) -> List[Any]:
lowerCAmelCase = self.model_type.from_pretrained(*A_ , **A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCAmelCase = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model
lowerCAmelCase = ["""bias""", """LayerNorm.weight"""]
lowerCAmelCase = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
lowerCAmelCase = Adafactor(
A_ , lr=self.hparams.learning_rate , scale_parameter=A_ , relative_step=A_ )
else:
lowerCAmelCase = AdamW(
A_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCAmelCase = optimizer
lowerCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self , A_ , A_ ) -> Optional[Any]:
return self.validation_step(A_ , A_ )
def __snake_case ( self , A_ ) -> Tuple:
return self.validation_end(A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self , A_ ) -> Union[str, Any]:
if stage == "test":
lowerCAmelCase = len(self.test_dataloader().dataset )
else:
lowerCAmelCase = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=A_ )
lowerCAmelCase = len(self.train_dataloader().dataset )
def __snake_case ( self , A_ , A_ , A_ = False ) -> int:
raise NotImplementedError("""You must implement this for your task""" )
def __snake_case ( self ) -> Any:
return self.train_loader
def __snake_case ( self ) -> Optional[Any]:
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=A_ )
def __snake_case ( self ) -> Tuple:
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=A_ )
def __snake_case ( self , A_ ) -> List[str]:
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
A_ , list(filter(A_ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.output_dir.joinpath("""best_tfmr""" )
lowerCAmelCase = self.step_count
self.model.save_pretrained(A_ )
self.tokenizer.save_pretrained(A_ )
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
parser.add_argument(
"""--model_name_or_path""" , default=A_ , type=A_ , required=A_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=A_ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=A_ , type=A_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(A_ ).parent / """test_run""" / """cache""" ) , type=A_ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=A_ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=A_ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=A_ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=A_ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5e-5 , type=A_ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=A_ , metavar=A_ , type=A_ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=A_ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=A_ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=A_ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=A_ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=A_ )
parser.add_argument("""--train_batch_size""" , default=32 , type=A_ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=A_ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class __snake_case( pl.Callback ):
'''simple docstring'''
def __snake_case ( self , A_ , A_ ) -> Optional[Any]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __snake_case( pl.Callback ):
'''simple docstring'''
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(A_ )
class __snake_case( pl.Callback ):
'''simple docstring'''
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(A_ )
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
rank_zero_info("""***** Validation results *****""" )
lowerCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A_ , str(metrics[key] ) ) )
def __snake_case ( self , A_ , A_ ) -> Tuple:
rank_zero_info("""***** Test results *****""" )
lowerCAmelCase = trainer.callback_metrics
# Log and save results to file
lowerCAmelCase = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(A_ , """w""" ) as writer:
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A_ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(A_ , str(metrics[key] ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / """test_run""" / """model_checkpoints""" ) , type=_SCREAMING_SNAKE_CASE , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_SCREAMING_SNAKE_CASE , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_SCREAMING_SNAKE_CASE , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=_SCREAMING_SNAKE_CASE , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / """test_run""" / """dummy-train-data""" ) , type=_SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def _snake_case ( _SCREAMING_SNAKE_CASE : BaseTransformer , _SCREAMING_SNAKE_CASE : argparse.Namespace , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : int=[] , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : Dict , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
lowerCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
lowerCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_SCREAMING_SNAKE_CASE )
if logging_callback is None:
lowerCAmelCase = LoggingCallback()
lowerCAmelCase = {}
if args.fpaa:
lowerCAmelCase = 16
if args.gpus > 1:
lowerCAmelCase = """auto"""
lowerCAmelCase = """ddp"""
lowerCAmelCase = args.accumulate_grad_batches
lowerCAmelCase = None
lowerCAmelCase = """auto"""
lowerCAmelCase = pl.Trainer.from_argparse_args(
_SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(_SCREAMING_SNAKE_CASE )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer | 344 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = SamImageProcessor()
lowerCAmelCase_ = SamProcessor(_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).image_processor
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
lowerCAmelCase_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = SamProcessor(image_processor=_UpperCamelCase )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(_UpperCamelCase , return_tensors='''np''' )
lowerCAmelCase_ = processor(images=_UpperCamelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = SamProcessor(image_processor=_UpperCamelCase )
lowerCAmelCase_ = [torch.ones((1, 3, 5, 5) )]
lowerCAmelCase_ = [[1764, 2646]]
lowerCAmelCase_ = [[683, 1024]]
lowerCAmelCase_ = processor.post_process_masks(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase_ = processor.post_process_masks(
_UpperCamelCase , torch.tensor(_UpperCamelCase ) , torch.tensor(_UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase_ = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase_ = processor.post_process_masks(_UpperCamelCase , np.array(_UpperCamelCase ) , np.array(_UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase_ = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase_ = processor.post_process_masks(_UpperCamelCase , np.array(_UpperCamelCase ) , np.array(_UpperCamelCase ) )
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = SamImageProcessor()
lowerCAmelCase_ = SamProcessor(_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).image_processor
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
lowerCAmelCase_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = SamProcessor(image_processor=_UpperCamelCase )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(_UpperCamelCase , return_tensors='''np''' )
lowerCAmelCase_ = processor(images=_UpperCamelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = SamProcessor(image_processor=_UpperCamelCase )
lowerCAmelCase_ = [tf.ones((1, 3, 5, 5) )]
lowerCAmelCase_ = [[1764, 2646]]
lowerCAmelCase_ = [[683, 1024]]
lowerCAmelCase_ = processor.post_process_masks(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase_ = processor.post_process_masks(
_UpperCamelCase , tf.convert_to_tensor(_UpperCamelCase ) , tf.convert_to_tensor(_UpperCamelCase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase_ = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase_ = processor.post_process_masks(
_UpperCamelCase , np.array(_UpperCamelCase ) , np.array(_UpperCamelCase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase_ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCAmelCase_ = processor.post_process_masks(
_UpperCamelCase , np.array(_UpperCamelCase ) , np.array(_UpperCamelCase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = SamImageProcessor()
lowerCAmelCase_ = SamProcessor(_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).image_processor
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = SamProcessor(image_processor=_UpperCamelCase )
lowerCAmelCase_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCAmelCase_ = [tf.convert_to_tensor(_UpperCamelCase )]
lowerCAmelCase_ = [torch.tensor(_UpperCamelCase )]
lowerCAmelCase_ = [[1764, 2646]]
lowerCAmelCase_ = [[683, 1024]]
lowerCAmelCase_ = processor.post_process_masks(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , return_tensors='''tf''' )
lowerCAmelCase_ = processor.post_process_masks(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = SamProcessor(image_processor=_UpperCamelCase )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(_UpperCamelCase , return_tensors='''pt''' )["""pixel_values"""].numpy()
lowerCAmelCase_ = processor(images=_UpperCamelCase , return_tensors='''pt''' )["""pixel_values"""].numpy()
lowerCAmelCase_ = image_processor(_UpperCamelCase , return_tensors='''tf''' )["""pixel_values"""].numpy()
lowerCAmelCase_ = processor(images=_UpperCamelCase , return_tensors='''tf''' )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) )
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) )
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) )
| 274 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __UpperCamelCase ( lowercase__ ):
lowercase : "DiagonalGaussianDistribution"
class __UpperCamelCase ( lowercase__ , lowercase__ ):
lowercase : Tuple = True
@register_to_config
def __init__( self :List[str] ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 3 ,_UpperCamelCase :Tuple[str] = ("DownEncoderBlock2D",) ,_UpperCamelCase :Tuple[str] = ("UpDecoderBlock2D",) ,_UpperCamelCase :Tuple[int] = (6_4,) ,_UpperCamelCase :int = 1 ,_UpperCamelCase :str = "silu" ,_UpperCamelCase :int = 4 ,_UpperCamelCase :int = 3_2 ,_UpperCamelCase :int = 3_2 ,_UpperCamelCase :float = 0.1_82_15 ,):
super().__init__()
# pass init params to Encoder
snake_case_ : Union[str, Any] = Encoder(
in_channels=_UpperCamelCase ,out_channels=_UpperCamelCase ,down_block_types=_UpperCamelCase ,block_out_channels=_UpperCamelCase ,layers_per_block=_UpperCamelCase ,act_fn=_UpperCamelCase ,norm_num_groups=_UpperCamelCase ,double_z=_UpperCamelCase ,)
# pass init params to Decoder
snake_case_ : Optional[Any] = Decoder(
in_channels=_UpperCamelCase ,out_channels=_UpperCamelCase ,up_block_types=_UpperCamelCase ,block_out_channels=_UpperCamelCase ,layers_per_block=_UpperCamelCase ,norm_num_groups=_UpperCamelCase ,act_fn=_UpperCamelCase ,)
snake_case_ : int = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
snake_case_ : Union[str, Any] = nn.Convad(_UpperCamelCase ,_UpperCamelCase ,1 )
snake_case_ : Optional[Any] = False
snake_case_ : Union[str, Any] = False
# only relevant if vae tiling is enabled
snake_case_ : Optional[Any] = self.config.sample_size
snake_case_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
snake_case_ : Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
snake_case_ : str = 0.25
def a__ ( self :str ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[str, Any]=False ):
if isinstance(_UpperCamelCase ,(Encoder, Decoder) ):
snake_case_ : Any = value
def a__ ( self :int ,_UpperCamelCase :bool = True ):
snake_case_ : Optional[Any] = use_tiling
def a__ ( self :Optional[int] ):
self.enable_tiling(_UpperCamelCase )
def a__ ( self :Union[str, Any] ):
snake_case_ : List[Any] = True
def a__ ( self :Dict ):
snake_case_ : int = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self :List[Any] ):
snake_case_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase :str ,_UpperCamelCase :torch.nn.Module ,_UpperCamelCase :Dict[str, AttentionProcessor] ):
if hasattr(_UpperCamelCase ,"""set_processor""" ):
snake_case_ : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' ,_UpperCamelCase ,_UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return processors
def a__ ( self :Any ,_UpperCamelCase :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
snake_case_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase ,_UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_UpperCamelCase :str ,_UpperCamelCase :torch.nn.Module ,_UpperCamelCase :str ):
if hasattr(_UpperCamelCase ,"""set_processor""" ):
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' ,_UpperCamelCase ,_UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Tuple ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self :Union[str, Any] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase ,return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
snake_case_ : Any = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
snake_case_ : List[Any] = torch.cat(_UpperCamelCase )
else:
snake_case_ : str = self.encoder(_UpperCamelCase )
snake_case_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
snake_case_ : int = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def a__ ( self :str ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase ,return_dict=_UpperCamelCase )
snake_case_ : List[str] = self.post_quant_conv(_UpperCamelCase )
snake_case_ : Dict = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def a__ ( self :Dict ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = True ):
if self.use_slicing and z.shape[0] > 1:
snake_case_ : Tuple = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
snake_case_ : List[str] = torch.cat(_UpperCamelCase )
else:
snake_case_ : List[str] = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[Any] ):
snake_case_ : Union[str, Any] = min(a.shape[2] ,b.shape[2] ,_UpperCamelCase )
for y in range(_UpperCamelCase ):
snake_case_ : int = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self :Tuple ,_UpperCamelCase :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Tuple ):
snake_case_ : Optional[int] = min(a.shape[3] ,b.shape[3] ,_UpperCamelCase )
for x in range(_UpperCamelCase ):
snake_case_ : str = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self :Tuple ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = True ):
snake_case_ : Tuple = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
snake_case_ : Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
snake_case_ : Optional[Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
snake_case_ : Optional[int] = []
for i in range(0 ,x.shape[2] ,_UpperCamelCase ):
snake_case_ : Optional[Any] = []
for j in range(0 ,x.shape[3] ,_UpperCamelCase ):
snake_case_ : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
snake_case_ : int = self.encoder(_UpperCamelCase )
snake_case_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
snake_case_ : str = []
for i, row in enumerate(_UpperCamelCase ):
snake_case_ : str = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case_ : List[Any] = self.blend_v(rows[i - 1][j] ,_UpperCamelCase ,_UpperCamelCase )
if j > 0:
snake_case_ : Any = self.blend_h(row[j - 1] ,_UpperCamelCase ,_UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase ,dim=3 ) )
snake_case_ : str = torch.cat(_UpperCamelCase ,dim=2 )
snake_case_ : Union[str, Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def a__ ( self :List[Any] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = True ):
snake_case_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
snake_case_ : Optional[Any] = int(self.tile_sample_min_size * self.tile_overlap_factor )
snake_case_ : int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
snake_case_ : Tuple = []
for i in range(0 ,z.shape[2] ,_UpperCamelCase ):
snake_case_ : Dict = []
for j in range(0 ,z.shape[3] ,_UpperCamelCase ):
snake_case_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
snake_case_ : List[str] = self.post_quant_conv(_UpperCamelCase )
snake_case_ : int = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
snake_case_ : Tuple = []
for i, row in enumerate(_UpperCamelCase ):
snake_case_ : int = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case_ : Dict = self.blend_v(rows[i - 1][j] ,_UpperCamelCase ,_UpperCamelCase )
if j > 0:
snake_case_ : int = self.blend_h(row[j - 1] ,_UpperCamelCase ,_UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase ,dim=3 ) )
snake_case_ : Optional[int] = torch.cat(_UpperCamelCase ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def a__ ( self :Any ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :Optional[torch.Generator] = None ,):
snake_case_ : Union[str, Any] = sample
snake_case_ : Optional[int] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
snake_case_ : List[Any] = posterior.sample(generator=_UpperCamelCase )
else:
snake_case_ : Tuple = posterior.mode()
snake_case_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase ) | 334 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" ) -> List[Any]:
"""simple docstring"""
with open(hf_hub_download(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,repo_type='dataset' ),'r' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {}
_UpperCAmelCase = []
_UpperCAmelCase = []
for key, info in class_info.items():
_UpperCAmelCase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = thing_ids
_UpperCAmelCase = class_names
return metadata
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=4_00 , a__=None , a__=True , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=10 , a__=False , a__=2_55 , a__="shi-labs/oneformer_demo" , a__="ade20k_panoptic.json" , a__=10 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = class_info_file
_UpperCAmelCase = prepare_metadata(a__ , a__ )
_UpperCAmelCase = num_text
_UpperCAmelCase = repo_path
# for the post_process_functions
_UpperCAmelCase = 2
_UpperCAmelCase = 10
_UpperCAmelCase = 10
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = num_labels
_UpperCAmelCase = do_reduce_labels
_UpperCAmelCase = ignore_index
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __A ( self , a__ , a__=False ):
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(a__ , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(a__ , key=lambda a__ : item[0] )[0]
_UpperCAmelCase = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
def __A ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCAmelCase__ = image_processing_class
def __A ( self ):
_UpperCAmelCase = OneFormerImageProcessorTester(self )
@property
def __A ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , 'image_mean' ) )
self.assertTrue(hasattr(a__ , 'image_std' ) )
self.assertTrue(hasattr(a__ , 'do_normalize' ) )
self.assertTrue(hasattr(a__ , 'do_resize' ) )
self.assertTrue(hasattr(a__ , 'size' ) )
self.assertTrue(hasattr(a__ , 'ignore_index' ) )
self.assertTrue(hasattr(a__ , 'class_info_file' ) )
self.assertTrue(hasattr(a__ , 'num_text' ) )
self.assertTrue(hasattr(a__ , 'repo_path' ) )
self.assertTrue(hasattr(a__ , 'metadata' ) )
self.assertTrue(hasattr(a__ , 'do_reduce_labels' ) )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(a__ , batched=a__ )
_UpperCAmelCase = image_processor(
a__ , ['semantic'] * len(a__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ):
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(a__ , batched=a__ )
_UpperCAmelCase = image_processor(
a__ , ['semantic'] * len(a__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ):
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(a__ , batched=a__ )
_UpperCAmelCase = image_processor(
a__ , ['semantic'] * len(a__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self , a__=False , a__=False , a__="np" ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase = self.image_processing_tester.num_labels
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=a__ )
if with_segmentation_maps:
_UpperCAmelCase = num_labels
if is_instance_map:
_UpperCAmelCase = list(range(a__ ) ) * 2
_UpperCAmelCase = dict(enumerate(a__ ) )
_UpperCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase = [Image.fromarray(a__ ) for annotation in annotations]
_UpperCAmelCase = image_processor(
a__ , ['semantic'] * len(a__ ) , a__ , return_tensors='pt' , instance_id_to_semantic_id=a__ , pad_and_return_pixel_mask=a__ , )
return inputs
def __A ( self ):
pass
def __A ( self ):
def common(a__=False , a__=None ):
_UpperCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=a__ , is_instance_map=a__ , segmentation_type=a__ )
_UpperCAmelCase = inputs['mask_labels']
_UpperCAmelCase = inputs['class_labels']
_UpperCAmelCase = inputs['pixel_values']
_UpperCAmelCase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(a__ , a__ , a__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(a__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=a__ )
common(is_instance_map=a__ , segmentation_type='pil' )
common(is_instance_map=a__ , segmentation_type='pil' )
def __A ( self ):
_UpperCAmelCase = np.zeros((20, 50) )
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = binary_mask_to_rle(a__ )
self.assertEqual(len(a__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(a__ )
self.assertEqual(len(a__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(a__ , target_sizes=a__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = image_processor.post_process_instance_segmentation(a__ , threshold=0 )
self.assertTrue(len(a__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , a__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = image_processor.post_process_panoptic_segmentation(a__ , threshold=0 )
self.assertTrue(len(a__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , a__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 494 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,bias=SCREAMING_SNAKE_CASE )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE,map_location='cpu' )
_UpperCAmelCase = mam_aaa['args'] or mam_aaa['cfg']['model']
_UpperCAmelCase = mam_aaa['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
_UpperCAmelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
_UpperCAmelCase = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE,max_position_embeddings=1024,encoder_layers=args.encoder_layers,decoder_layers=args.decoder_layers,encoder_attention_heads=args.encoder_attention_heads,decoder_attention_heads=args.decoder_attention_heads,encoder_ffn_dim=args.encoder_ffn_embed_dim,decoder_ffn_dim=args.decoder_ffn_embed_dim,d_model=args.encoder_embed_dim,encoder_layerdrop=args.encoder_layerdrop,decoder_layerdrop=args.decoder_layerdrop,dropout=args.dropout,attention_dropout=args.attention_dropout,activation_dropout=args.activation_dropout,activation_function='relu',)
_UpperCAmelCase = state_dict['decoder.embed_tokens.weight']
_UpperCAmelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE,strict=SCREAMING_SNAKE_CASE )
_UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 494 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 5_1_2,
}
class A_ ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,__A : Optional[Any]=None ,__A : List[str]=None ,__A : Dict="<pad>" ,__A : int="</s>" ,__A : List[str]="<unk>" ,__A : Tuple="<mask_2>" ,__A : Any="<mask_1>" ,__A : Union[str, Any]=None ,__A : Optional[int]=103 ,**__A : List[str] ,) -> int:
_lowercase = offset
if additional_special_tokens is not None:
if not isinstance(A_ ,A_ ):
raise TypeError(
F"""additional_special_tokens should be of type {type(A_ )}, but is"""
F""" {type(A_ )}""" )
_lowercase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(A_ ) ,self.offset - 1 )
]
if len(set(A_ ) ) != len(A_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_lowercase = additional_special_tokens_extended
else:
_lowercase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 ,self.offset )]
super().__init__(
A_ ,tokenizer_file=A_ ,pad_token=A_ ,eos_token=A_ ,unk_token=A_ ,mask_token=A_ ,mask_token_sent=A_ ,offset=A_ ,additional_special_tokens=A_ ,**A_ ,)
_lowercase = vocab_file
_lowercase = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[Any] ,__A : List[str] ) -> Any:
_lowercase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCAmelCase ( self : List[Any] ,__A : List ,__A : Optional[List] = None ,__A : bool = False ) -> List[Any]:
if already_has_special_tokens:
return self._special_token_mask(A_ )
elif token_ids_a is None:
return self._special_token_mask(A_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __UpperCAmelCase ( self : int ,__A : Dict ,__A : List[Any]=None ) -> List[Any]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : List[Any] ,__A : str ,__A : Optional[str] = None ) -> List[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file ,A_ )
return (out_vocab_file,) | 67 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any ) ->List[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
_lowercase : Optional[Any] = downstream_dict['''projector.weight''']
_lowercase : Any = downstream_dict['''projector.bias''']
_lowercase : str = downstream_dict['''model.post_net.linear.weight''']
_lowercase : Tuple = downstream_dict['''model.post_net.linear.bias''']
return model
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : str , snake_case_ : Dict ) ->int:
'''simple docstring'''
_lowercase : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
_lowercase : Optional[int] = downstream_dict['''model.linear.weight''']
_lowercase : Optional[Any] = downstream_dict['''model.linear.bias''']
return model
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : str ) ->Dict:
'''simple docstring'''
_lowercase : Any = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ )
_lowercase : Union[str, Any] = downstream_dict['''connector.weight''']
_lowercase : Union[str, Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowercase : List[str] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
_lowercase : Dict = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
_lowercase : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_lowercase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_lowercase : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_lowercase : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_lowercase : Optional[Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Optional[int] ) ->Dict:
'''simple docstring'''
_lowercase : Tuple = torch.load(snake_case_ , map_location='''cpu''' )
_lowercase : str = checkpoint['''Downstream''']
_lowercase : Any = WavaVecaConfig.from_pretrained(snake_case_ )
_lowercase : Any = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
_lowercase : List[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_lowercase : Union[str, Any] = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
_lowercase : List[str] = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForXVector''' ):
_lowercase : int = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
_lowercase : str = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 411 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Optional[int] = model_type_to_module_name(snake_case_ )
_lowercase : Optional[Any] = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : int = importlib.import_module('''transformers''' )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : int , ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case_ , encoding='''utf-8''' ) as reader:
return json.load(snake_case_ )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> Tuple:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __lowercase ( cls : str , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
_lowercase : int = kwargs.pop('''config''' , UpperCamelCase_ )
_lowercase : Union[str, Any] = kwargs.pop('''trust_remote_code''' , UpperCamelCase_ )
_lowercase : str = True
_lowercase , _lowercase : int = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Any = config_dict.get('''image_processor_type''' , UpperCamelCase_ )
_lowercase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowercase : str = config_dict.pop('''feature_extractor_type''' , UpperCamelCase_ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_lowercase : Any = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_lowercase : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.image_processor_type``
_lowercase : Optional[int] = getattr(UpperCamelCase_ , '''image_processor_type''' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_lowercase : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_lowercase : int = image_processor_class_from_name(UpperCamelCase_ )
_lowercase : str = image_processor_auto_map is not None
_lowercase : List[str] = image_processor_class is not None or type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING
_lowercase : Tuple = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = kwargs.pop('''code_revision''' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_lowercase : List[str] = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase_ )]
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __lowercase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 411 | 1 |
'''simple docstring'''
a : Union[str, Any] = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
a : Tuple = frozenset(['''prompt''', '''negative_prompt'''])
a : List[str] = frozenset([])
a : Optional[Any] = frozenset(['''image'''])
a : List[str] = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
a : Dict = frozenset(['''image'''])
a : int = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
a : Optional[int] = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
a : int = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
a : Union[str, Any] = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
a : Optional[int] = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
a : List[str] = frozenset(['''image''', '''mask_image'''])
a : List[Any] = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
a : Optional[Any] = frozenset(['''example_image''', '''image''', '''mask_image'''])
a : Union[str, Any] = frozenset(['''class_labels'''])
a : List[Any] = frozenset(['''class_labels'''])
a : str = frozenset(['''batch_size'''])
a : Union[str, Any] = frozenset([])
a : Any = frozenset(['''batch_size'''])
a : Union[str, Any] = frozenset([])
a : str = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
a : Tuple = frozenset(['''prompt''', '''negative_prompt'''])
a : List[str] = frozenset(['''input_tokens'''])
a : List[str] = frozenset(['''input_tokens'''])
| 69 |
import os
def __A ( ) -> Dict:
with open(os.path.dirname(__lowerCamelCase ) + """/p022_names.txt""" ) as file:
a = str(file.readlines()[0] )
a = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
a = 0
a = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
a = 0
return total_score
if __name__ == "__main__":
print(solution())
| 468 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=224 , _UpperCAmelCase=1000 , _UpperCAmelCase=[3, 3, 6, 4] , _UpperCAmelCase=[48, 56, 112, 220] , ):
__a : Union[str, Any] = parent
__a : str = batch_size
__a : Dict = num_channels
__a : List[Any] = is_training
__a : int = use_labels
__a : Tuple = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Dict = num_labels
__a : Union[str, Any] = image_size
__a : Union[str, Any] = layer_depths
__a : List[Any] = embed_dims
def _lowerCamelCase ( self ):
__a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1e-5 , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = SwiftFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.num_labels
__a : Dict = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__a : Optional[int] = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ):
((__a) , (__a) , (__a)) : List[Any] = self.prepare_config_and_inputs()
__a : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : str = SwiftFormerModelTester(self )
__a : List[Any] = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_UpperCAmelCase )
__a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : int = model_class(_UpperCAmelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Union[str, Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = SwiftFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : List[Any] = outputs.hidden_states
__a : Any = 8
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_UpperCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
def _config_zero_init(_UpperCAmelCase ):
__a : Any = copy.deepcopy(_UpperCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_UpperCAmelCase , _UpperCAmelCase , 1e-1_0 )
if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ):
__a : Optional[int] = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return configs_no_init
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Tuple = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
__a : List[str] = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self ):
pass
def __A ( ) -> Union[str, Any]:
__a : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
__a : int = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_UpperCAmelCase )
__a : List[Any] = self.default_image_processor
__a : List[Any] = prepare_img()
__a : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
__a : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : str = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) | 101 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , **_UpperCAmelCase ):
requires_backends(self , ['''bs4'''] )
super().__init__(**_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Tuple = []
__a : Dict = []
__a : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a : Dict = parent.find_all(child.name , recursive=_UpperCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_UpperCAmelCase ) else next(i for i, s in enumerate(_UpperCAmelCase , 1 ) if s is child ) )
__a : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Dict = BeautifulSoup(_UpperCAmelCase , '''html.parser''' )
__a : Any = []
__a : Dict = []
__a : Optional[int] = []
for element in html_code.descendants:
if type(_UpperCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a : Dict = html.unescape(_UpperCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_UpperCAmelCase )
__a , __a : List[Any] = self.xpath_soup(_UpperCAmelCase )
stringaxtag_seq.append(_UpperCAmelCase )
stringaxsubs_seq.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = ''''''
for tagname, subs in zip(_UpperCAmelCase , _UpperCAmelCase ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , _UpperCAmelCase ):
__a : Optional[Any] = False
# Check that strings has a valid type
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : str = True
elif isinstance(_UpperCAmelCase , (list, tuple) ):
if len(_UpperCAmelCase ) == 0 or isinstance(html_strings[0] , _UpperCAmelCase ):
__a : Optional[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(_UpperCAmelCase )}.""" )
__a : List[str] = bool(isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , _UpperCAmelCase )) )
if not is_batched:
__a : List[Any] = [html_strings]
# Get nodes + xpaths
__a : Tuple = []
__a : int = []
for html_string in html_strings:
__a , __a , __a : Any = self.get_three_from_single(_UpperCAmelCase )
nodes.append(_UpperCAmelCase )
__a : List[Any] = []
for node, tag_list, sub_list in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = self.construct_xpath(_UpperCAmelCase , _UpperCAmelCase )
xpath_strings.append(_UpperCAmelCase )
xpaths.append(_UpperCAmelCase )
# return as Dict
__a : int = {'''nodes''': nodes, '''xpaths''': xpaths}
__a : Any = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs | 101 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.