code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import math
import sys
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
UpperCAmelCase = [-1] * (number + 1)
UpperCAmelCase = 0
for i in range(1 , number + 1 ):
UpperCAmelCase = sys.maxsize
UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
UpperCAmelCase = 1 + answers[i - (j**2)]
UpperCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51
| 1
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
stooge(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
snake_case_ , snake_case_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
snake_case_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE , i + t , (SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 531
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 100_0000 )-> int:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
for current_denominator in range(1 , limit + 1 ):
snake_case_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ = current_numerator
snake_case_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 531
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.json'''}
__a = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__a = {'''mgp-str''': 27}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="[GO]" , SCREAMING_SNAKE_CASE__="[GO]" , SCREAMING_SNAKE_CASE__="[s]" , SCREAMING_SNAKE_CASE__="[GO]" , **SCREAMING_SNAKE_CASE__ ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
lowercase : str = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : str = {v: k for k, v in self.vocab.items()}
@property
def __lowerCamelCase ( self ):
return len(self.vocab )
def __lowerCamelCase ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE__ )
return char_tokens
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.vocab.get(SCREAMING_SNAKE_CASE__ , self.vocab.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) )
return
lowercase : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
return (vocab_file,)
| 319
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__a = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(PATH_TO_TRANSFORMERS)
__a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__a = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__a = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Tuple = None
# source code of `config_class`
lowercase : Dict = inspect.getsource(_UpperCamelCase )
lowercase : List[str] = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase : List[str] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowercase : Dict = ckpt_name
break
return checkpoint
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase : str = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase : Optional[int] = get_checkpoint_from_config_class(_UpperCamelCase )
lowercase : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Any = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 319
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowercase ( a__ ):
def __init__( self : List[str] , lowercase__ : List[Any] , lowercase__ : Any ):
a_ = params
a_ = np.array(lowercase__ )
a_ = np.array([len(lowercase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Any , lowercase__ : Tuple ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : Dict ):
return len(self.lengths )
def __magic_name__ ( self : Dict ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __magic_name__ ( self : Dict ):
a_ = self.params.max_model_input_size
a_ = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase__ )} too long sequences." )
def divide_chunks(lowercase__ : Any , lowercase__ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase__ ) , lowercase__ )]
a_ = []
a_ = []
if self.params.mlm:
a_ , a_ = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
a_ , a_ = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a_ = np.insert(lowercase__ , 0 , lowercase__ )
if sub_s[-1] != sep_id:
a_ = np.insert(lowercase__ , len(lowercase__ ) , lowercase__ )
assert len(lowercase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase__ )
new_tok_ids.extend(lowercase__ )
new_lengths.extend([len(lowercase__ ) for l in sub_seqs] )
a_ = np.array(lowercase__ )
a_ = np.array(lowercase__ )
def __magic_name__ ( self : Tuple ):
a_ = len(self )
a_ = self.lengths > 1_1
a_ = self.token_ids[indices]
a_ = self.lengths[indices]
a_ = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def __magic_name__ ( self : Optional[Any] ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
a_ = self.params.special_tok_ids['''unk_token''']
a_ = len(self )
a_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a_ = (unk_occs / self.lengths) < 0.5
a_ = self.token_ids[indices]
a_ = self.lengths[indices]
a_ = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def __magic_name__ ( self : int ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : str , lowercase__ : Union[str, Any] ):
a_ = [t[0] for t in batch]
a_ = [t[1] for t in batch]
assert len(lowercase__ ) == len(lowercase__ )
# Max for paddings
a_ = max(lowercase__ )
# Pad token ids
if self.params.mlm:
a_ = self.params.special_tok_ids['''pad_token''']
else:
a_ = self.params.special_tok_ids['''unk_token''']
a_ = [list(t.astype(lowercase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase__ )) for t in token_ids]
assert len(tk_ ) == len(lowercase__ )
assert all(len(lowercase__ ) == max_seq_len_ for t in tk_ )
a_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
a_ = torch.tensor(lowercase__ ) # (bs)
return tk_t, lg_t
| 711
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowercase :
def __init__( self : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=1_3 , lowercase__ : int=7 , lowercase__ : Dict=True , lowercase__ : List[Any]=True , lowercase__ : int=True , lowercase__ : Optional[Any]=True , lowercase__ : Union[str, Any]=9_9 , lowercase__ : Any=6_4 , lowercase__ : int=3_2 , lowercase__ : str=5 , lowercase__ : List[str]=4 , lowercase__ : str=3_7 , lowercase__ : Tuple="gelu" , lowercase__ : Any=0.1 , lowercase__ : Any=0.1 , lowercase__ : Dict=5_1_2 , lowercase__ : List[Any]=1_6 , lowercase__ : List[str]=2 , lowercase__ : Dict=0.02 , lowercase__ : List[Any]=3 , lowercase__ : List[Any]=4 , lowercase__ : Optional[int]=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = embedding_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def __magic_name__ ( self : List[Any] ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Optional[Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def __magic_name__ ( self : List[str] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : str ):
a_ = MegatronBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : int ):
a_ = MegatronBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[str] ):
a_ = MegatronBertForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : int , lowercase__ : str , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] ):
a_ = MegatronBertForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int ):
a_ = MegatronBertForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[Any] ):
a_ = MegatronBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : str , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] ):
a_ = self.num_labels
a_ = MegatronBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_labels
a_ = MegatronBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_choices
a_ = MegatronBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[Any] ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
# test_resize_embeddings = False
_lowerCAmelCase = False
def __magic_name__ ( self : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any=False ):
a_ = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ )
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def __magic_name__ ( self : Any ):
a_ = MegatronBertModelTester(self )
a_ = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def __magic_name__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase__ )
def __magic_name__ ( self : List[str] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase__ )
def __magic_name__ ( self : Tuple ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase__ )
def __magic_name__ ( self : int ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase__ )
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
UpperCamelCase__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Union[str, Any] ):
a_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
a_ = os.path.join(os.environ['''MYDIR'''] , lowercase__ )
a_ = MegatronBertModel.from_pretrained(lowercase__ )
model.to(lowercase__ )
model.half()
a_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
a_ = model(lowercase__ )[0]
a_ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , lowercase__ )
a_ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
a_ = output[0, ii, jj]
a_ = expected[3 * ii + jj]
a_ = '''ii={} jj={} a={} b={}'''.format(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(math.isclose(lowercase__ , lowercase__ , rel_tol=lowercase__ , abs_tol=lowercase__ ) , msg=lowercase__ )
| 143
| 0
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Union[str, Any] ):
# Initialise PyTorch model
_a = TaConfig.from_json_file(__a )
print(F'''Building PyTorch model from configuration: {config}''' )
_a = TaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__a, __a, __a )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__a )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 131
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37
| 0
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase =False
try:
lowercase =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __magic_name__ :
def __init__( self , snake_case = None , snake_case = []) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int =0
_UpperCAmelCase : Union[str, Any] =choices
_UpperCAmelCase : str =prompt
if sys.platform == "win32":
_UpperCAmelCase : int ='*'
else:
_UpperCAmelCase : Optional[int] ='➔ '
def lowerCAmelCase ( self , snake_case , snake_case = "") -> Tuple:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , snake_case)
else:
forceWrite(self.choices[index] , snake_case)
def lowerCAmelCase ( self , snake_case) -> Any:
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(snake_case)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def lowerCAmelCase ( self , snake_case , snake_case = 1) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case)
move_cursor(snake_case , direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP['up'])
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP['down'])
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP['newline'])
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
move_cursor(len(self.choices) - self.position , 'DOWN')
return self.position
@input.mark(KEYMAP['interrupt'])
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
move_cursor(len(self.choices) - self.position , 'DOWN')
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case)] for number in range(1_0)])
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =int(chr(self.current_selection))
_UpperCAmelCase : Tuple =index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP , -movement)
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case)
else:
return
else:
return
def lowerCAmelCase ( self , snake_case = 0) -> Any:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n')
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n')
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n')
_UpperCAmelCase : int =default_choice
for i in range(len(self.choices)):
self.print_choice(snake_case)
forceWrite('\n')
move_cursor(len(self.choices) - self.position , 'UP')
with cursor.hide():
while True:
if in_colab:
try:
_UpperCAmelCase : List[Any] =int(builtins.input())
except ValueError:
_UpperCAmelCase : str =default_choice
else:
_UpperCAmelCase : Tuple =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1 , 'UP')
clear_line()
self.write_choice(snake_case , '\n')
return choice
| 331
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="fnet"
def __init__( self , snake_case=3_2_0_0_0 , snake_case=7_6_8 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu_new" , snake_case=0.1 , snake_case=5_1_2 , snake_case=4 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=False , snake_case=5_1_2 , snake_case=3 , snake_case=1 , snake_case=2 , **snake_case , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case)
_UpperCAmelCase : List[Any] =vocab_size
_UpperCAmelCase : Dict =max_position_embeddings
_UpperCAmelCase : str =hidden_size
_UpperCAmelCase : Any =num_hidden_layers
_UpperCAmelCase : Optional[int] =intermediate_size
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : Tuple =hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =type_vocab_size
_UpperCAmelCase : Union[str, Any] =layer_norm_eps
_UpperCAmelCase : str =use_tpu_fourier_optimizations
_UpperCAmelCase : Optional[int] =tpu_short_seq_length
| 331
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCamelCase_ : float , UpperCamelCase_ : float ) -> bool:
_lowerCAmelCase : List[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_lowerCAmelCase : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
_lowerCAmelCase : Dict = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Callable[[float], float] , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(UpperCamelCase_ , UpperCamelCase_ ) ) for _ in range(UpperCamelCase_ ) ) * (max_value - min_value)
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : float = 1.0 ):
'''simple docstring'''
def identity_function(UpperCamelCase_ : float ) -> float:
return x
_lowerCAmelCase : List[Any] = area_under_curve_estimator(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : List[Any] = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
def function_to_integrate(UpperCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
_lowerCAmelCase : Optional[int] = area_under_curve_estimator(
UpperCamelCase_ , UpperCamelCase_ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Union[str, Any] = "▁"
_lowerCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __snake_case (_a , unittest.TestCase ):
lowerCAmelCase__ = BertGenerationTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Dict = BertGenerationTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Dict = """<s>"""
_lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_UpperCAmelCase ) , 1002 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = BertGenerationTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : List[Any] = [1_8536, 2260, 101]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_lowerCAmelCase : Union[str, Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_lowerCAmelCase : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase : Optional[Any] = """ """.join(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase )
_lowerCAmelCase : str = BertGenerationConfig()
_lowerCAmelCase : Dict = BertGenerationEncoder(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 429
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Optional[Any] ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(A_ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(A_ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(A_ )
component_size[u_node] += component_size[v_node]
self.set_component(A_ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(A_ ,A_ ):
__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(A_ ,A_ ,A_ )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def _A ( ):
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 624
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase : List[Any] =logging.get_logger(__name__)
_lowercase : Optional[Any] ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase : Optional[int] ={
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowercase : Union[str, Any] ={
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
a__ : List[str] = bs[:]
a__ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(_lowercase)
cs.append(2**8 + n)
n += 1
a__ : Optional[Any] = [chr(_lowercase) for n in cs]
return dict(zip(_lowercase , _lowercase))
def lowerCAmelCase_ ( _lowercase : Dict) -> Optional[int]:
"""simple docstring"""
a__ : str = set()
a__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : int = char
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = VOCAB_FILES_NAMES
__lowerCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :str = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ) -> Tuple:
"""simple docstring"""
a__ : Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
a__ : Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
a__ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
a__ : Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
a__ : Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
a__ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : List[str] = json.load(__lowercase )
a__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
a__ : Any = errors # how to handle errors in decoding
a__ : Tuple = bytes_to_unicode()
a__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : Any = merges_handle.read().split("""\n""" )[1:-1]
a__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
a__ : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Any = {}
a__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : List[str] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : Optional[int] = tuple(__lowercase )
a__ : Tuple = get_pairs(__lowercase )
if not pairs:
return token
while True:
a__ : Tuple = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : Optional[Any] = bigram
a__ : List[Any] = []
a__ : List[Any] = 0
while i < len(__lowercase ):
try:
a__ : int = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : str = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : int = tuple(__lowercase )
a__ : Dict = new_word
if len(__lowercase ) == 1:
break
else:
a__ : int = get_pairs(__lowercase )
a__ : Union[str, Any] = """ """.join(__lowercase )
a__ : Dict = word
return word
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = []
for token in re.findall(self.pat , __lowercase ):
a__ : List[str] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = """""".join(__lowercase )
a__ : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Dict = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : Optional[int] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : int = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : int = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
a__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : str = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False , **__lowercase ) -> Tuple:
"""simple docstring"""
a__ : str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
a__ : int = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = PaddingStrategy.DO_NOT_PAD , __lowercase = None , __lowercase = None , ) -> dict:
"""simple docstring"""
a__ : Union[str, Any] = super()._pad(
encoded_inputs=__lowercase , max_length=__lowercase , padding_strategy=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , )
# Load from model defaults
if return_attention_mask is None:
a__ : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Optional[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(__lowercase )
if needs_to_be_padded:
a__ : Dict = len(__lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : Optional[int] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Optional[int] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 136
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a__ : int = deepcopy(__lowercase )
elif os.path.exists(__lowercase ):
with io.open(__lowercase , """r""" , encoding="""utf-8""" ) as f:
a__ : str = json.load(__lowercase )
else:
try:
a__ : Union[str, Any] = baseaa.urlsafe_baadecode(__lowercase ).decode("""utf-8""" )
a__ : Tuple = json.loads(__lowercase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
a__ : Tuple = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
a__ : Tuple = False
if self.is_zeroa() or self.is_zeroa():
a__ : Any = set(["""cpu""", """nvme"""] )
a__ : List[str] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a__ : Tuple = True
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = self.config
# find the config node of interest if it exists
a__ : Any = ds_key_long.split(""".""" )
a__ : List[Any] = nodes.pop()
for node in nodes:
a__ : Any = config.get(__lowercase )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=None ) -> Any:
"""simple docstring"""
a__ , a__ : int = self.find_config_node(__lowercase )
if config is None:
return default
return config.get(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False ) -> Dict:
"""simple docstring"""
a__ : Dict = self.config
# find the config node of interest if it exists
a__ : Dict = ds_key_long.split(""".""" )
for node in nodes:
a__ : Any = config
a__ : Optional[Any] = config.get(__lowercase )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = self.get_value(__lowercase )
return False if value is None else bool(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : int = self.get_value(__lowercase )
return False if value is None else not bool(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
return self._offload
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = engine
def SCREAMING_SNAKE_CASE__( self , __lowercase , **__lowercase ) -> List[str]:
"""simple docstring"""
self.engine.backward(__lowercase , **__lowercase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> int:
"""simple docstring"""
super().__init__(__lowercase , device_placement=__lowercase , scaler=__lowercase )
a__ : Any = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None ) -> int:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Dict:
"""simple docstring"""
super().__init__(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=0.0_0_1 , __lowercase=0 , **__lowercase ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = params
a__ : List[str] = lr
a__ : List[str] = weight_decay
a__ : Tuple = kwargs
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=None , __lowercase=0 , **__lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = optimizer
a__ : List[Any] = total_num_steps
a__ : Optional[Any] = warmup_num_steps
a__ : Optional[int] = kwargs
| 136
| 1
|
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = ''
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return data[1:] + data[0]
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
A = ''
for i in range(len(lowerCAmelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A = int('0b' + data[0] + data[-1] , 2 )
A = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A = message[:4]
A = message[4:]
A = apply_table(lowerCAmelCase__ , lowerCAmelCase__ )
A = xor(lowerCAmelCase__ , lowerCAmelCase__ )
A = apply_sbox(lowerCAmelCase__ , temp[:4] ) # noqa: E741
A = apply_sbox(lowerCAmelCase__ , temp[4:] )
A = '0' * (2 - len(lowerCAmelCase__ )) + l # noqa: E741
A = '0' * (2 - len(lowerCAmelCase__ )) + r
A = apply_table(l + r , lowerCAmelCase__ )
A = xor(lowerCAmelCase__ , lowerCAmelCase__ )
return temp + right
if __name__ == "__main__":
__snake_case :Tuple =input('Enter 10 bit key: ')
__snake_case :Union[str, Any] =input('Enter 8 bit message: ')
__snake_case :int =[6, 3, 7, 4, 8, 5, 10, 9]
__snake_case :Dict =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__snake_case :Optional[int] =[2, 4, 3, 1]
__snake_case :Tuple =[2, 6, 3, 1, 4, 8, 5, 7]
__snake_case :Dict =[4, 1, 3, 5, 7, 2, 8, 6]
__snake_case :str =[4, 1, 2, 3, 2, 3, 4, 1]
__snake_case :Optional[int] =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__snake_case :Optional[Any] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__snake_case :List[Any] =apply_table(key, paa_table)
__snake_case :Tuple =temp[:5]
__snake_case :int =temp[5:]
__snake_case :int =left_shift(left)
__snake_case :List[str] =left_shift(right)
__snake_case :Tuple =apply_table(left + right, pa_table)
__snake_case :List[Any] =left_shift(left)
__snake_case :str =left_shift(right)
__snake_case :Optional[Any] =left_shift(left)
__snake_case :Optional[Any] =left_shift(right)
__snake_case :Any =apply_table(left + right, pa_table)
# encryption
__snake_case :Optional[Any] =apply_table(message, IP)
__snake_case :Optional[int] =function(expansion, sa, sa, keya, temp)
__snake_case :Dict =temp[4:] + temp[:4]
__snake_case :Any =function(expansion, sa, sa, keya, temp)
__snake_case :Optional[int] =apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__snake_case :str =apply_table(CT, IP)
__snake_case :str =function(expansion, sa, sa, keya, temp)
__snake_case :List[str] =temp[4:] + temp[:4]
__snake_case :Tuple =function(expansion, sa, sa, keya, temp)
__snake_case :Optional[int] =apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 224
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :int =logging.get_logger(__name__)
__snake_case :Any ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[Any] = 'switch_transformers'
A_ : str = ['past_key_values']
A_ : Optional[int] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Dict , __UpperCamelCase : List[str]=32_128 , __UpperCamelCase : Optional[Any]=768 , __UpperCamelCase : Optional[int]=64 , __UpperCamelCase : Tuple=2_048 , __UpperCamelCase : Dict=64 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[int]=0.0_1 , __UpperCamelCase : Any="float32" , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : List[Any]=128 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=1e-6 , __UpperCamelCase : Union[str, Any]=0.0_0_1 , __UpperCamelCase : Tuple=0.0_0_1 , __UpperCamelCase : int=1.0 , __UpperCamelCase : Optional[Any]="relu" , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=0 , __UpperCamelCase : int=1 , **__UpperCamelCase : Union[str, Any] , ) -> Dict:
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_sparse_encoder_layers
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A = self.num_layers // self.num_sparse_encoder_layers
else:
A = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A = self.num_decoder_layers # HACK: this will create 0 sparse layers
A = num_heads
A = num_experts
A = expert_capacity
A = router_bias
A = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
A = router_dtype
A = router_ignore_padding_tokens
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = add_router_probs
A = router_z_loss_coef
A = router_aux_loss_coef
A = self.feed_forward_proj.split('-' )
A = act_info[-1]
A = act_info[0] == 'gated'
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A = 'gelu_new'
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
| 224
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowercase : Tuple = 6
_lowercase : List[Any] = 1
_lowercase : Union[str, Any] = 19_01
_lowercase : Optional[Any] = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowercase : Any = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowercase : Optional[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowercase : List[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowercase : Any = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 125
|
def __magic_name__ ( lowercase = 100 ) -> int:
"""simple docstring"""
lowercase_ : Dict = (n * (n + 1) // 2) ** 2
lowercase_ : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 458
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Any = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Any , _a:Tuple ):
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_a ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple=0 , _a:Dict=(4, 4, 64, 64) , _a:List[Any]=False ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:List[Any]=False , _a:str="CompVis/stable-diffusion-v1-4" ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = '''bf16''' if fpaa else None
snake_case__ , snake_case__ = FlaxUNetaDConditionModel.from_pretrained(
_a , subfolder='''unet''' , dtype=_a , revision=_a )
return model, params
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int]=0 , _a:Tuple=(4, 77, 7_68) , _a:int=False ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Optional[Any] , _a:Optional[int] ):
snake_case__ , snake_case__ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_a )
snake_case__ = self.get_latents(_a , fpaa=_a )
snake_case__ = self.get_encoder_hidden_states(_a , fpaa=_a )
snake_case__ = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[int] , _a:Tuple , _a:str ):
snake_case__ , snake_case__ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_a )
snake_case__ = self.get_latents(_a , shape=(4, 4, 96, 96) , fpaa=_a )
snake_case__ = self.get_encoder_hidden_states(_a , shape=(4, 77, 10_24) , fpaa=_a )
snake_case__ = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
| 208
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _snake_case( _a ):
__snake_case: Tuple = """openai/whisper-base"""
__snake_case: Optional[Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
__snake_case: Dict = """transcriber"""
__snake_case: List[Any] = WhisperProcessor
__snake_case: int = WhisperForConditionalGeneration
__snake_case: int = ["""audio"""]
__snake_case: Union[str, Any] = ["""text"""]
def _UpperCamelCase (self : Dict , a : Any ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(a , return_tensors='pt' ).input_features
def _UpperCamelCase (self : Optional[int] , a : Tuple ) -> List[str]:
"""simple docstring"""
return self.model.generate(inputs=a )
def _UpperCamelCase (self : Any , a : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(a , skip_special_tokens=a )[0]
| 531
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=0.9_99 , _lowerCAmelCase="cosine" , ) -> int:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case = []
for i in range(_lowerCAmelCase ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class UpperCamelCase( _a , _a ):
snake_case_ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ : Union[str, Any] = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int = 1_0_0_0 , SCREAMING_SNAKE_CASE : float = 0.00085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ) -> Optional[int]:
'''simple docstring'''
if trained_betas is not None:
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int=None ) -> Optional[int]:
'''simple docstring'''
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__snake_case = 1 if len(SCREAMING_SNAKE_CASE ) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE ) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case = self.index_for_timestep(SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
else:
__snake_case = self.sigmas_interpol[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Optional[int]:
'''simple docstring'''
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__snake_case = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
__snake_case = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE ) ) , SCREAMING_SNAKE_CASE )
__snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE )
# interpolate sigmas
__snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
# mps does not support float64
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
# interpolate timesteps
__snake_case = self.sigma_to_t(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
__snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = sigma.log()
# get distribution
__snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__snake_case = low_idx + 1
__snake_case = self.log_sigmas[low_idx]
__snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.view(sigma.shape )
return t
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.sample is None
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
__snake_case = self.index_for_timestep(SCREAMING_SNAKE_CASE )
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas_interpol[step_index + 1]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas_interpol[step_index]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
__snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
__snake_case = self.sample
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE ):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__snake_case = self.timesteps.to(original_samples.device )
__snake_case = timesteps.to(original_samples.device )
__snake_case = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__snake_case = sigma.unsqueeze(-1 )
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 371
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """visual_bert"""
def __init__( self , UpperCAmelCase__=3_0_5_2_2 , UpperCAmelCase__=7_6_8 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1_2 , UpperCAmelCase__=3_0_7_2 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=2 , **UpperCAmelCase__ , ) -> str:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : int = vocab_size
_A : Tuple = max_position_embeddings
_A : List[Any] = hidden_size
_A : Optional[Any] = visual_embedding_dim
_A : int = num_hidden_layers
_A : str = num_attention_heads
_A : Any = intermediate_size
_A : Dict = hidden_act
_A : Dict = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : int = initializer_range
_A : List[Any] = type_vocab_size
_A : List[str] = layer_norm_eps
_A : Union[str, Any] = bypass_transformer
_A : Optional[Any] = special_visual_initialize
| 417
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=1_3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=9_9 , UpperCAmelCase__=3_2 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=True , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=1_6 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ) -> int:
_A : Dict = parent
_A : Tuple = batch_size
_A : Optional[Any] = seq_length
_A : Dict = is_training
_A : Tuple = use_input_mask
_A : int = use_token_type_ids
_A : Tuple = use_labels
_A : Optional[int] = vocab_size
_A : List[Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : Union[str, Any] = num_attention_heads
_A : int = intermediate_multiple_size
_A : Optional[int] = hidden_act
_A : Any = hidden_dropout
_A : Union[str, Any] = attention_dropout
_A : Tuple = weight_tying
_A : Dict = max_position_embeddings
_A : Tuple = type_vocab_size
_A : Optional[int] = type_sequence_label_size
_A : str = initializer_range
_A : str = num_labels
_A : int = num_choices
_A : Optional[int] = scope
def _lowerCamelCase ( self ) -> Dict:
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Any = None
if self.use_input_mask:
_A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_A : List[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ) -> List[Any]:
_A , _A , _A , _A : Optional[Any] = self.prepare_config_and_inputs()
_A : List[Any] = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
_A : Dict = GPTNeoXJapaneseModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
_A : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
_A : int = True
_A : Tuple = GPTNeoXJapaneseModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
_A : int = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
_A : str = True
_A : List[str] = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
_A : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
_A : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
_A : Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
_A : str = output_from_no_past['''hidden_states'''][0]
_A : Optional[int] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
_A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def _lowerCamelCase ( self ) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A , _A : Tuple = config_and_inputs
_A : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__magic_name__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _lowerCamelCase ( self ) -> List[str]:
_A : Dict = GPTNeoXJapaneseModelTester(self )
_A : Any = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Tuple:
_A , _A , _A , _A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Dict:
_A , _A , _A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
_A , _A , _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_A : Tuple = None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
_A , _A , _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__ )
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Union[str, Any] = '''abeja/gpt-neox-japanese-2.7b'''
_A : Optional[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_A : List[str] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_A : int = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCAmelCase__ )
_A : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCAmelCase__ )
_A : Union[str, Any] = []
for prompt in prompts:
_A : int = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' ).input_ids
_A : Optional[int] = model.generate(UpperCAmelCase__ , max_length=5_0 )
_A : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 417
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : Optional[int] = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 527
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase_ : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = {}
with open(A , '''r''' ) as file:
for line_number, line in enumerate(A ):
SCREAMING_SNAKE_CASE : Dict = line.strip()
if line:
SCREAMING_SNAKE_CASE : Union[str, Any] = line.split()
SCREAMING_SNAKE_CASE : Any = line_number
SCREAMING_SNAKE_CASE : Any = words[0]
SCREAMING_SNAKE_CASE : Optional[Any] = value
return result
def UpperCAmelCase ( A : Union[str, Any] , A : Tuple , A : List[Any] , A : str , A : int ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(A , A )
SCREAMING_SNAKE_CASE : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
SCREAMING_SNAKE_CASE : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE : Tuple = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE : List[Any] = getattr(A , A ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE : List[str] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE : int = getattr(A , A )
SCREAMING_SNAKE_CASE : int = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE : Dict = value[0]
else:
SCREAMING_SNAKE_CASE : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Dict = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE : List[str] = getattr(A , A )
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( A : Optional[Any] , A : List[str] , A : Optional[int] , A : Tuple , A : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
SCREAMING_SNAKE_CASE : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE : List[Any] = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE : Tuple = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = key
SCREAMING_SNAKE_CASE : int = value if '''lm_head''' in full_key else value[0]
lowerCAmelCase_ : Union[str, Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCAmelCase ( A : List[Any] , A : Tuple , A : Dict=None , A : Optional[int]=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : Optional[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : int = name.split(A )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Union[str, Any] = mapped_key.replace('''*''' , A )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : int = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Optional[Any] = '''weight'''
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
if hf_dict is not None:
rename_dict(A , A , A , A , A )
else:
set_recursively(A , A , A , A , A )
return is_used
return is_used
def UpperCAmelCase ( A : Optional[int] , A : Any , A : int ):
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : str = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
else:
SCREAMING_SNAKE_CASE : Optional[Any] = load_wavaveca_layer(A , A , A )
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( A : Tuple , A : int , A : Tuple , A : int , A : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
SCREAMING_SNAKE_CASE : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def UpperCAmelCase ( A : Any , A : Union[str, Any] , A : Optional[Any]=None , A : Any=None , A : Any=True , A : str=False ):
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig.from_pretrained(A )
else:
SCREAMING_SNAKE_CASE : Any = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE : List[Any] = read_txt_into_dict(A )
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : Tuple = WavaVecaForSequenceClassification(A )
SCREAMING_SNAKE_CASE : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
feature_extractor.save_pretrained(A )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : Optional[Any] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : List[str] = target_dict.pad_index
SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE : Dict = target_dict.eos_index
SCREAMING_SNAKE_CASE : Dict = len(target_dict.symbols )
SCREAMING_SNAKE_CASE : str = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Tuple = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
SCREAMING_SNAKE_CASE : Any = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
SCREAMING_SNAKE_CASE : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = WavaVecaForCTC(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaForPreTraining(A )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE : str = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE : Dict = fairseq.tasks.setup_task(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
SCREAMING_SNAKE_CASE : List[Any] = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
lowerCAmelCase_ : Dict = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 527
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.ModuleList(UpperCamelCase_ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = True , ) -> Optional[int]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase_ , UpperCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase_ , is_main_process=UpperCamelCase_ , save_function=UpperCamelCase_ , safe_serialization=UpperCamelCase_ , variant=UpperCamelCase_ , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = ControlNetModel.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
controlnets.append(UpperCamelCase_ )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(UpperCamelCase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(UpperCamelCase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(UpperCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(UpperCamelCase_ )
| 714
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__A = logging.getLogger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''summarization'''
snake_case_ = ['''loss''']
snake_case_ = ROUGE_KEYS
snake_case_ = '''rouge2'''
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__lowerCamelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowerCamelCase__ , num_labels=lowerCamelCase__ , mode=self.mode , **lowerCamelCase__ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
__lowerCamelCase = Path(self.output_dir ) / 'metrics.json'
__lowerCamelCase = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
__lowerCamelCase = 0
__lowerCamelCase = defaultdict(lowerCamelCase__ )
__lowerCamelCase = self.config.model_type
__lowerCamelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
__lowerCamelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__lowerCamelCase = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
__lowerCamelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__lowerCamelCase = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__lowerCamelCase = get_git_info()['repo_sha']
__lowerCamelCase = hparams.num_workers
__lowerCamelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase__ ):
__lowerCamelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__lowerCamelCase = self.decoder_start_token_id
__lowerCamelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
__lowerCamelCase = False
__lowerCamelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__lowerCamelCase = self.hparams.eval_max_gen_length
else:
__lowerCamelCase = self.model.config.max_length
__lowerCamelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase_ ( self , lowerCamelCase__ ) -> Dict[str, List[str]]:
'''simple docstring'''
__lowerCamelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase__ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
__lowerCamelCase = True
return readable_batch
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.model(lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return lmap(str.strip , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.pad_token_id
__lowerCamelCase , __lowerCamelCase = batch['input_ids'], batch['attention_mask']
__lowerCamelCase = batch['labels']
if isinstance(self.model , lowerCamelCase__ ):
__lowerCamelCase = self.model._shift_right(lowerCamelCase__ )
else:
__lowerCamelCase = shift_tokens_right(lowerCamelCase__ , lowerCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__lowerCamelCase = decoder_input_ids
self.save_readable_batch(lowerCamelCase__ )
__lowerCamelCase = self(lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__lowerCamelCase = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__lowerCamelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
__lowerCamelCase , __lowerCamelCase = label_smoothed_nll_loss(
lowerCamelCase__ , lowerCamelCase__ , self.hparams.label_smoothing , ignore_index=lowerCamelCase__ )
return (loss,)
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self._step(lowerCamelCase__ )
__lowerCamelCase = dict(zip(self.loss_names , lowerCamelCase__ ) )
# tokens per batch
__lowerCamelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
__lowerCamelCase = batch['input_ids'].shape[0]
__lowerCamelCase = batch['input_ids'].eq(self.pad ).sum()
__lowerCamelCase = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
__lowerCamelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__lowerCamelCase = losses['loss']
__lowerCamelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
__lowerCamelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase__ )
__lowerCamelCase = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__lowerCamelCase = self.step_count
self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path
__lowerCamelCase = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> dict:
'''simple docstring'''
__lowerCamelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__lowerCamelCase = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__lowerCamelCase = (time.time() - ta) / batch['input_ids'].shape[0]
__lowerCamelCase = self.ids_to_clean_text(lowerCamelCase__ )
__lowerCamelCase = self.ids_to_clean_text(batch['labels'] )
__lowerCamelCase = self._step(lowerCamelCase__ )
__lowerCamelCase = dict(zip(self.loss_names , lowerCamelCase__ ) )
__lowerCamelCase = self.calc_generative_metrics(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = np.mean(lmap(lowerCamelCase__ , lowerCamelCase__ ) )
base_metrics.update(gen_time=lowerCamelCase__ , gen_len=lowerCamelCase__ , preds=lowerCamelCase__ , target=lowerCamelCase__ , **lowerCamelCase__ )
return base_metrics
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.validation_epoch_end(lowerCamelCase__ , prefix='test' )
def lowercase_ ( self , lowerCamelCase__ ) -> SeqaSeqDataset:
'''simple docstring'''
__lowerCamelCase = self.n_obs[type_path]
__lowerCamelCase = self.target_lens[type_path]
__lowerCamelCase = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase__ , n_obs=lowerCamelCase__ , max_target_length=lowerCamelCase__ , **self.dataset_kwargs , )
return dataset
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
__lowerCamelCase = self.get_dataset(lowerCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_sortish_sampler(lowerCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_sampler=lowerCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
def lowercase_ ( self ) -> DataLoader:
'''simple docstring'''
__lowerCamelCase = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase__ )
return dataloader
def lowercase_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowercase_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase_ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ )
add_generic_args(lowerCamelCase__ , lowerCamelCase__ )
parser.add_argument(
'--max_source_length' , default=1_024 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCamelCase__ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCamelCase__ )
parser.add_argument('--max_tokens_per_batch' , type=lowerCamelCase__ , default=lowerCamelCase__ )
parser.add_argument('--logger_name' , type=lowerCamelCase__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowerCamelCase__ , default=500 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowerCamelCase__ , default='summarization' , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowerCamelCase__ , default=0.0 , required=lowerCamelCase__ )
parser.add_argument('--src_lang' , type=lowerCamelCase__ , default='' , required=lowerCamelCase__ )
parser.add_argument('--tgt_lang' , type=lowerCamelCase__ , default='' , required=lowerCamelCase__ )
parser.add_argument('--eval_beams' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ )
parser.add_argument(
'--val_metric' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowerCamelCase__ , default=1 , required=lowerCamelCase__ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''translation'''
snake_case_ = ['''loss''']
snake_case_ = ['''bleu''']
snake_case_ = '''bleu'''
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = hparams.src_lang
__lowerCamelCase = hparams.tgt_lang
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> dict:
'''simple docstring'''
return calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase__ )
check_output_dir(UpperCamelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
__lowerCamelCase = SummarizationModule(UpperCamelCase__ )
else:
__lowerCamelCase = TranslationModule(UpperCamelCase__ )
__lowerCamelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
__lowerCamelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = os.environ.get('WANDB_PROJECT' , UpperCamelCase__ )
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=UpperCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__lowerCamelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__lowerCamelCase = False
__lowerCamelCase = args.val_metric == 'loss'
__lowerCamelCase = generic_train(
UpperCamelCase__ , UpperCamelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase__ ) , early_stopping_callback=UpperCamelCase__ , logger=UpperCamelCase__ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
__lowerCamelCase = ''
__lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=UpperCamelCase__ ) )
if checkpoints:
__lowerCamelCase = checkpoints[-1]
__lowerCamelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = pl.Trainer.add_argparse_args(parser)
__A = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__A = parser.parse_args()
main(args)
| 167
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowercase (snake_case__ : Any , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase = int(re.match(R""".*layer_(\d*).*""" , UpperCamelCase__ )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowercase (snake_case__ : Dict ) -> Dict:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase = re.search(R"""[^\d](\d+)$""" , str(UpperCamelCase__ ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def lowercase (snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
if bloom_config_file == "":
lowerCAmelCase = BloomConfig()
else:
lowerCAmelCase = BloomConfig.from_json_file(UpperCamelCase__ )
if shard_model:
lowerCAmelCase = os.listdir(UpperCamelCase__ )
lowerCAmelCase = sorted(filter(lambda snake_case__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
lowerCAmelCase = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = BloomConfig()
for j, file in enumerate(UpperCamelCase__ ):
print("""Processing file: {}""".format(UpperCamelCase__ ) )
lowerCAmelCase = None
for i in range(UpperCamelCase__ ):
# load all TP files
lowerCAmelCase = file.replace("""model_00""" , f'''model_0{i}''' )
lowerCAmelCase = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(UpperCamelCase__ )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase__ , os.path.join(
UpperCamelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) )
lowerCAmelCase = BloomConfig()
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase = total_size
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + '\n'
f.write(UpperCamelCase__ )
else:
lowerCAmelCase = BloomModel(UpperCamelCase__ )
lowerCAmelCase = os.listdir(UpperCamelCase__ )
lowerCAmelCase = sorted(filter(lambda snake_case__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
lowerCAmelCase = None
for i, file in enumerate(UpperCamelCase__ ):
lowerCAmelCase = None
for i in range(UpperCamelCase__ ):
# load all TP files
lowerCAmelCase = file.replace("""model_00""" , f'''model_0{i}''' )
lowerCAmelCase = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(UpperCamelCase__ )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
lowerCAmelCase = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowerCAmelCase = set(other_keys.missing_keys )
else:
lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 169
|
'''simple docstring'''
from math import factorial
UpperCAmelCase_ : List[str] = {str(d): factorial(d) for d in range(1_0)}
def _lowercase ( UpperCamelCase__ : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase__ ) )
def _lowercase ( ):
__A : Union[str, Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3, UpperCamelCase__ ) if sum_of_digit_factorial(UpperCamelCase__ ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 365
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase :
def __init__( self: Any , __UpperCamelCase: int , __UpperCamelCase: int=13 , __UpperCamelCase: Optional[Any]=7 , __UpperCamelCase: int=True , __UpperCamelCase: List[Any]=True , __UpperCamelCase: Any=True , __UpperCamelCase: Optional[Any]=True , __UpperCamelCase: Any=99 , __UpperCamelCase: Dict=64 , __UpperCamelCase: Optional[Any]=32 , __UpperCamelCase: List[str]=5 , __UpperCamelCase: List[str]=4 , __UpperCamelCase: Any=37 , __UpperCamelCase: Union[str, Any]="gelu" , __UpperCamelCase: Optional[Any]=0.1 , __UpperCamelCase: int=0.1 , __UpperCamelCase: Tuple=512 , __UpperCamelCase: str=16 , __UpperCamelCase: Tuple=2 , __UpperCamelCase: Dict=0.0_2 , __UpperCamelCase: int=3 , __UpperCamelCase: Dict=4 , __UpperCamelCase: List[Any]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = embedding_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _A ( self: str ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self: Optional[int] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def _A ( self: str , __UpperCamelCase: Tuple , __UpperCamelCase: List[Any] , __UpperCamelCase: Tuple , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: str , __UpperCamelCase: List[Any] ):
_a = MegatronBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
_a = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self: int , __UpperCamelCase: Optional[Any] , __UpperCamelCase: str , __UpperCamelCase: str , __UpperCamelCase: Tuple , __UpperCamelCase: int , __UpperCamelCase: List[Any] , __UpperCamelCase: List[str] ):
_a = MegatronBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self: List[Any] , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: List[Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: Any , __UpperCamelCase: List[str] , __UpperCamelCase: Dict ):
_a = MegatronBertForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self: List[str] , __UpperCamelCase: List[str] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: List[str] , __UpperCamelCase: List[str] ):
_a = MegatronBertForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _A ( self: List[str] , __UpperCamelCase: str , __UpperCamelCase: Optional[int] , __UpperCamelCase: str , __UpperCamelCase: Tuple , __UpperCamelCase: List[str] , __UpperCamelCase: Dict , __UpperCamelCase: Tuple ):
_a = MegatronBertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _A ( self: Tuple , __UpperCamelCase: Any , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: str , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Dict , __UpperCamelCase: Any , __UpperCamelCase: List[Any] ):
_a = MegatronBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self: Any , __UpperCamelCase: str , __UpperCamelCase: str , __UpperCamelCase: Optional[int] , __UpperCamelCase: int , __UpperCamelCase: Tuple , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Union[str, Any] ):
_a = self.num_labels
_a = MegatronBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self: Any , __UpperCamelCase: Tuple , __UpperCamelCase: Optional[Any] , __UpperCamelCase: List[str] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Dict ):
_a = self.num_labels
_a = MegatronBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self: Dict , __UpperCamelCase: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: int , __UpperCamelCase: List[str] , __UpperCamelCase: str , __UpperCamelCase: Optional[int] , __UpperCamelCase: List[Any] ):
_a = self.num_choices
_a = MegatronBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self: Any ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
a: Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a: Optional[int] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a: Union[str, Any] = True
# test_resize_embeddings = False
a: Optional[int] = False
def _A ( self: List[str] , __UpperCamelCase: Any , __UpperCamelCase: Tuple , __UpperCamelCase: Any=False ):
_a = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _A ( self: Dict ):
_a = MegatronBertModelTester(self )
_a = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _A ( self: List[Any] ):
self.config_tester.run_common_tests()
def _A ( self: Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCamelCase )
def _A ( self: int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCamelCase )
def _A ( self: Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCamelCase )
def _A ( self: Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCamelCase )
def _A ( self: List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCamelCase )
def _A ( self: Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCamelCase )
def _A ( self: Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCamelCase )
def _A ( self: List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> Optional[int]:
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase :Tuple = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def _A ( self: List[Any] ):
_a = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
_a = os.path.join(os.environ['''MYDIR'''] , __UpperCamelCase )
_a = MegatronBertModel.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
model.half()
_a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
_a = model(__UpperCamelCase )[0]
_a = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCamelCase )
_a = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
_a = output[0, ii, jj]
_a = expected[3 * ii + jj]
_a = '''ii={} jj={} a={} b={}'''.format(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.assertTrue(math.isclose(__UpperCamelCase , __UpperCamelCase , rel_tol=__UpperCamelCase , abs_tol=__UpperCamelCase ) , msg=__UpperCamelCase )
| 346
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# Initialise PyTorch model
_a = BertConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
_a = BertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase :Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 346
| 1
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE = 'src/transformers'
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'\[([^\]]+)\]')
def a (lowerCAmelCase__ ):
__a = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def a (lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__=None , lowerCAmelCase__=None ):
__a = 0
__a = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
__a = ["""\n""".join(lines[:index] )]
else:
__a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__a = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
__a = [lines[index + 1]]
index += 1
else:
__a = []
else:
blocks.append("""\n""".join(lowerCAmelCase__ ) )
__a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append("""\n""".join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def a (lowerCAmelCase__ ):
def _inner(lowerCAmelCase__ ):
return key(lowerCAmelCase__ ).lower().replace("""_""" , """""" )
return _inner
def a (lowerCAmelCase__ , lowerCAmelCase__=None ):
# If no key is provided, we use a noop.
def noop(lowerCAmelCase__ ):
return x
if key is None:
__a = noop
# Constants are all uppercase, they go first.
__a = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__a = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__a = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
__a = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ )
def a (lowerCAmelCase__ ):
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase__ ):
__a = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
__a = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] ) + "]"
__a = import_statement.split("""\n""" )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__a = 2 if lines[1].strip() == """[""" else 1
__a = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__a = sort_objects(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )
__a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__a = _re_bracket_content.sub(_replace , lines[1] )
else:
__a = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
__a = get_indent(lines[1] ) + """, """.join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__a = _re_bracket_content.sub(_replace , lowerCAmelCase__ )
return import_statement
def a (lowerCAmelCase__ , lowerCAmelCase__=True ):
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f:
__a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__a = split_code_in_indented_blocks(
lowerCAmelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__a = main_blocks[block_idx]
__a = block.split("""\n""" )
# Get to the start of the imports.
__a = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__a = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__a = """\n""".join(block_lines[line_idx:-1] )
__a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__a = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__a = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__a = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__a = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
__a = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__a = 0
__a = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__a = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowerCAmelCase__ ) )
def a (lowerCAmelCase__=True ):
__a = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
__a = sort_imports(os.path.join(lowerCAmelCase__ , """__init__.py""" ) , check_only=lowerCAmelCase__ )
if result:
__a = [os.path.join(lowerCAmelCase__ , """__init__.py""" )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f'''Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 99
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A_ : List[Any] =logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
a_ = feature_size
a_ = sampling_rate
a_ = padding_value
a_ = kwargs.pop("""padding_side""" , """right""" )
a_ = kwargs.pop("""return_attention_mask""" , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
a_ = processed_features[self.model_input_names[0]]
a_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCAmelCase ) == 0:
if return_attention_mask:
a_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a_ = required_input[0]
if isinstance(_UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCAmelCase ):
a_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCAmelCase ):
a_ = """tf"""
elif is_torch_tensor(_UpperCAmelCase ):
a_ = """pt"""
elif isinstance(_UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
a_ = """np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(_UpperCAmelCase )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a_ = to_numpy(_UpperCAmelCase )
else:
a_ = [to_numpy(_UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
a_ = self._get_padding_strategies(padding=_UpperCAmelCase , max_length=_UpperCAmelCase )
a_ = processed_features[self.model_input_names[0]]
a_ = len(_UpperCAmelCase )
if not all(len(_UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
a_ = []
for i in range(_UpperCAmelCase ):
a_ = {k: v[i] for k, v in processed_features.items()}
# truncation
a_ = self._truncate(
_UpperCAmelCase , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
truncated_inputs.append(_UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a_ = PaddingStrategy.MAX_LENGTH
a_ = {}
for i in range(_UpperCAmelCase ):
# padding
a_ = self._pad(
truncated_inputs[i] , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
a_ = []
if value.dtype is np.dtype(np.floataa ):
a_ = value.astype(np.floataa )
batch_outputs[key].append(_UpperCAmelCase )
return BatchFeature(_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
a_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a_ = len(_UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a_ = np.ones(len(_UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
a_ = max_length - len(_UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
a_ = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
a_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a_ = np.pad(
_UpperCAmelCase , _UpperCAmelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
a_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a_ = np.pad(
_UpperCAmelCase , _UpperCAmelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
a_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a_ = len(_UpperCAmelCase ) > max_length
if needs_to_be_truncated:
a_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowercase__ ( self , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
a_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = PaddingStrategy(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = padding
else:
a_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 483
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : int = TextToVideoSDPipeline
snake_case__ : Dict = TEXT_TO_IMAGE_PARAMS
snake_case__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a_ : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=0 ) -> Optional[int]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
a_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ : List[str] = self.get_dummy_components()
a_ : Any = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a_ : int = 'np'
a_ : Optional[Any] = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
a_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ : List[str] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
a_ : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a_ : int = pipe.to('cuda' )
a_ : List[str] = 'Spiderman is surfing'
a_ : int = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames
a_ : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
a_ : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : str = pipe.to('cuda' )
a_ : Dict = 'Spiderman is surfing'
a_ : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[int] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames
a_ : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 701
|
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
a_ : List[Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 443
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Any=1_8 , UpperCamelCase_ : List[Any]=3_0 , UpperCamelCase_ : Optional[Any]=4_0_0 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size if size is not None else {'height': 1_8, 'width': 2_0}
__magic_name__ = do_thumbnail
__magic_name__ = do_align_axis
__magic_name__ = do_pad
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def a__ ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase__ ( a_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = DonutImageProcessor if is_vision_available() else None
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = DonutImageProcessingTester(self )
@property
def a__ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'image_std' ) )
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
# Previous config had dimensions in (width, height) order
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} )
def a__ ( self : Any ):
'''simple docstring'''
pass
@is_flaky()
def a__ ( self : Optional[int] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__magic_name__ = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__magic_name__ = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__magic_name__ = image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 545
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict=13 ,__lowerCamelCase : List[Any]=7 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Any=True ,__lowerCamelCase : Tuple=99 ,__lowerCamelCase : Union[str, Any]=32 ,__lowerCamelCase : Union[str, Any]=5 ,__lowerCamelCase : List[Any]=4 ,__lowerCamelCase : List[Any]=37 ,__lowerCamelCase : List[str]="gelu" ,__lowerCamelCase : Union[str, Any]=0.1 ,__lowerCamelCase : int=0.1 ,__lowerCamelCase : Union[str, Any]=1_28 ,__lowerCamelCase : Optional[Any]=32 ,__lowerCamelCase : Optional[Any]=16 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : Any=0.02 ,__lowerCamelCase : List[Any]=3 ,__lowerCamelCase : Optional[Any]=4 ,__lowerCamelCase : str=None ,):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a = ids_tensor([self.batch_size] ,self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.prepare_config_and_inputs()
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = NezhaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase )
a = model(__lowerCamelCase ,token_type_ids=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Dict ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[Any] ,):
'''simple docstring'''
a = True
a = NezhaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,)
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,)
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict ,__lowerCamelCase : Tuple ):
'''simple docstring'''
a = NezhaForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Dict ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : str ):
'''simple docstring'''
a = NezhaForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = NezhaForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,next_sentence_label=__lowerCamelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = NezhaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,start_positions=__lowerCamelCase ,end_positions=__lowerCamelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = self.num_labels
a = NezhaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : int ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = self.num_labels
a = NezhaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : int ,__lowerCamelCase : Any ,__lowerCamelCase : Dict ,__lowerCamelCase : Dict ):
'''simple docstring'''
a = self.num_choices
a = NezhaForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( a_ , a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
a = super()._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__lowerCamelCase )
a = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = NezhaModelTester(self )
a = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = NezhaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a = True
a = model_class(config=__lowerCamelCase )
a = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
a = torch.jit.trace(
__lowerCamelCase ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCamelCase ,os.path.join(__lowerCamelCase ,'''bert.pt''' ) )
a = torch.jit.load(os.path.join(__lowerCamelCase ,'''bert.pt''' ) ,map_location=__lowerCamelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCamelCase ) ,inputs_dict['''attention_mask'''].to(__lowerCamelCase ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0]
a = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,__lowerCamelCase )
a = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0]
a = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape ,__lowerCamelCase )
a = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) )
| 387
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
__a =LayoutLMTokenizer
__a =LayoutLMTokenizerFast
__a =True
__a =True
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
super().setUp()
__a = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCamelCase ( self , **lowerCamelCase ) ->Any:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
__a = 'UNwant\u00E9d,running'
__a = 'unwanted, running'
return input_text, output_text
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
pass
| 270
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 270
| 1
|
import argparse
import os
import re
import packaging.version
lowerCamelCase : List[Any] = '''examples/'''
lowerCamelCase : Union[str, Any] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase : Tuple = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase : Union[str, Any] = '''README.md'''
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ):
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Dict = f.read()
__lowercase , __lowercase : Any = REPLACE_PATTERNS[pattern]
__lowercase : str = replace.replace("""VERSION""" , _lowerCAmelCase )
__lowercase : Tuple = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCAmelCase )
def snake_case_ ( lowerCAmelCase_ : Dict ):
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern="""examples""" )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def snake_case_ ( ):
__lowercase : str = """🤗 Transformers currently provides the following architectures"""
__lowercase : int = """1. Want to contribute a new model?"""
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Dict = f.readlines()
# Find the start of the list.
__lowercase : str = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowercase : Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__lowercase : Tuple = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCAmelCase )
def snake_case_ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__lowercase : Any = f.read()
__lowercase : Tuple = REPLACE_PATTERNS["""init"""][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def snake_case_ ( lowerCAmelCase_ : Any=False ):
__lowercase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__lowercase : int = default_version.base_version
elif patch:
__lowercase : Optional[int] = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowercase : List[str] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowercase : Any = input(F"Which version are you releasing? [{default_version}]" )
if len(_lowerCAmelCase ) == 0:
__lowercase : str = default_version
print(F"Updating version to {version}." )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def snake_case_ ( ):
__lowercase : Any = get_version()
__lowercase : Any = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowercase : Dict = current_version.base_version
# Check with the user we got that right.
__lowercase : List[str] = input(F"Which version are we developing now? [{dev_version}]" )
if len(_lowerCAmelCase ) == 0:
__lowercase : int = dev_version
print(F"Updating version to {version}." )
global_version_update(_lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 149
|
import numpy
# List of input, output pairs
snake_case__ : Optional[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
snake_case__ : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
snake_case__ : List[Any] = [2, 4, 1, 5]
snake_case__ : int = len(train_data)
snake_case__ : List[Any] = 0.0_09
def lowercase ( _lowerCAmelCase , _lowerCAmelCase="train" ):
return calculate_hypothesis_value(_lowerCAmelCase , _lowerCAmelCase ) - output(
_lowerCAmelCase , _lowerCAmelCase )
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = 0
for i in range(len(_lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=m ):
UpperCAmelCase__ = 0
for i in range(_lowerCAmelCase ):
if index == -1:
summation_value += _error(_lowerCAmelCase )
else:
summation_value += _error(_lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = summation_of_cost_derivative(_lowerCAmelCase , _lowerCAmelCase ) / m
return cost_derivative_value
def lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase__ = 0.000002
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
while True:
j += 1
UpperCAmelCase__ = [0, 0, 0, 0]
for i in range(0 , len(_lowerCAmelCase ) ):
UpperCAmelCase__ = get_cost_derivative(i - 1 )
UpperCAmelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase , rtol=_lowerCAmelCase , ):
break
UpperCAmelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def lowercase ( ):
for i in range(len(_lowerCAmelCase ) ):
print(("""Actual output value:""", output(_lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(_lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 392
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( lowercase__ ):
'''simple docstring'''
A__ = ['''input_features''', '''is_longer''']
def __init__( self : Tuple , __A : List[Any]=64 , __A : str=4_8000 , __A : List[Any]=480 , __A : List[str]=10 , __A : Union[str, Any]=1024 , __A : Union[str, Any]=0.0 , __A : List[Any]=False , __A : float = 0 , __A : float = 1_4000 , __A : int = None , __A : str = "fusion" , __A : str = "repeatpad" , **__A : int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase__ = top_db
lowerCAmelCase__ = truncation
lowerCAmelCase__ = padding
lowerCAmelCase__ = fft_window_size
lowerCAmelCase__ = (fft_window_size >> 1) + 1
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = max_length_s
lowerCAmelCase__ = max_length_s * sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = frequency_min
lowerCAmelCase__ = frequency_max
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase__ , min_frequency=UpperCAmelCase__ , max_frequency=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , norm=UpperCAmelCase__ , mel_scale="""htk""" , )
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase__ , min_frequency=UpperCAmelCase__ , max_frequency=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Any , __A : np.array , __A : Optional[np.array] = None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = spectrogram(
UpperCAmelCase__ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase__ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowercase__ ( self : Dict , __A : str , __A : int , __A : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
# randomly choose index for each part
lowerCAmelCase__ = np.random.choice(ranges[0] )
lowerCAmelCase__ = np.random.choice(ranges[1] )
lowerCAmelCase__ = np.random.choice(ranges[2] )
lowerCAmelCase__ = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase__ = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase__ = torch.tensor(mel[None, None, :] )
lowerCAmelCase__ = torch.nn.functional.interpolate(
UpperCAmelCase__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=UpperCAmelCase__ )
lowerCAmelCase__ = mel_shrink[0][0].numpy()
lowerCAmelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase__ ( self : Union[str, Any] , __A : np.array , __A : Tuple , __A : Dict , __A : Optional[int] ) -> Tuple:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase__ = len(UpperCAmelCase__ ) - max_length
lowerCAmelCase__ = np.random.randint(0 , overflow + 1 )
lowerCAmelCase__ = waveform[idx : idx + max_length]
lowerCAmelCase__ = self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters )
lowerCAmelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase__ = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase__ = False
else:
lowerCAmelCase__ = self._random_mel_fusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase__ = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase__ = int(max_length / len(UpperCAmelCase__ ) )
lowerCAmelCase__ = np.stack(np.tile(UpperCAmelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase__ = int(max_length / len(UpperCAmelCase__ ) )
lowerCAmelCase__ = np.stack(np.tile(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase__ = np.pad(UpperCAmelCase__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters )
lowerCAmelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase__ = self._np_extract_fbank_features(UpperCAmelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : str = None , __A : Optional[str] = None , __A : Optional[int] = None , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , **__A : str , ) -> int:
'''simple docstring'''
lowerCAmelCase__ = truncation if truncation is not None else self.truncation
lowerCAmelCase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase__ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
lowerCAmelCase__ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray(UpperCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase__ = [
self._get_input_mel(UpperCAmelCase__ , max_length if max_length else self.nb_max_samples , UpperCAmelCase__ , UpperCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase__ )
is_longer.append(UpperCAmelCase__ )
if truncation == "fusion" and sum(UpperCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase__ = np.random.randint(0 , len(UpperCAmelCase__ ) )
lowerCAmelCase__ = True
if isinstance(input_mel[0] , UpperCAmelCase__ ):
lowerCAmelCase__ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase__ = [[longer] for longer in is_longer]
lowerCAmelCase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCAmelCase__ = BatchFeature(UpperCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase__ = input_features.convert_to_tensors(UpperCAmelCase__ )
return input_features
| 700
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_UpperCamelCase = """base_with_context"""
def _lowerCAmelCase( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> Any:
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCAmelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ = weights[F'''layers_{lyr_num}''']
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ = ly_weight["""attention"""]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _lowerCAmelCase( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ) -> Optional[int]:
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCAmelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ = weights[F'''layers_{lyr_num}''']
lowerCAmelCase__ = ly_weight["""attention"""]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _lowerCAmelCase( UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ) -> List[str]:
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCAmelCase_ )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ = weights[F'''layers_{lyr_num}''']
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ = ly_weight["""self_attention"""]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def _lowerCAmelCase( UpperCAmelCase_ : int ) -> Optional[Any]:
lowerCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ = jnp.tree_util.tree_map(onp.array , UpperCAmelCase_ )
lowerCAmelCase__ = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCAmelCase__ = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCAmelCase__ = inference.parse_training_gin_file(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase__ = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase_ )
lowerCAmelCase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , UpperCAmelCase_ )
lowerCAmelCase__ = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , UpperCAmelCase_ )
lowerCAmelCase__ = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , UpperCAmelCase_ )
lowerCAmelCase__ = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_UpperCamelCase = parser.parse_args()
main(args)
| 211
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase_ = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _UpperCAmelCase ( _lowerCamelCase : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
_lowerCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
_lowerCAmelCase : str = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_lowerCAmelCase : Tuple = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 384
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]="pt" ) -> Dict:
'''simple docstring'''
A = {'add_prefix_space': True} if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not line.startswith(' ' ) else {}
A = padding_side
return tokenizer(
[line] , max_length=lowerCAmelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Any=None , ) -> Union[str, Any]:
'''simple docstring'''
A = input_ids.ne(lowerCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int="train" , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]="" , ) -> Any:
super().__init__()
A = Path(__UpperCamelCase ).joinpath(type_path + '.source' )
A = Path(__UpperCamelCase ).joinpath(type_path + '.target' )
A = self.get_char_lens(self.src_file )
A = max_source_length
A = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
A = tokenizer
A = prefix
if n_obs is not None:
A = self.src_lens[:n_obs]
A = src_lang
A = tgt_lang
def __len__( self : Dict ) -> List[str]:
return len(self.src_lens )
def __getitem__( self : str , __UpperCamelCase : Dict ) -> Dict[str, torch.Tensor]:
A = index + 1 # linecache starts at 1
A = self.prefix + linecache.getline(str(self.src_file ) , __UpperCamelCase ).rstrip('\n' )
A = linecache.getline(str(self.tgt_file ) , __UpperCamelCase ).rstrip('\n' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCamelCase ) else self.tokenizer
)
A = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCamelCase ) else self.tokenizer
A = encode_line(__UpperCamelCase , __UpperCamelCase , self.max_source_length , 'right' )
A = encode_line(__UpperCamelCase , __UpperCamelCase , self.max_target_length , 'right' )
A = source_inputs['input_ids'].squeeze()
A = target_inputs['input_ids'].squeeze()
A = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : List[str] ) -> Dict:
return [len(__UpperCamelCase ) for x in Path(__UpperCamelCase ).open().readlines()]
def __UpperCamelCase ( self : str , __UpperCamelCase : Tuple ) -> Dict[str, torch.Tensor]:
A = torch.stack([x['input_ids'] for x in batch] )
A = torch.stack([x['attention_mask'] for x in batch] )
A = torch.stack([x['decoder_input_ids'] for x in batch] )
A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCamelCase )
else self.tokenizer.pad_token_id
)
A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCamelCase )
else self.tokenizer.pad_token_id
)
A = trim_batch(__UpperCamelCase , __UpperCamelCase )
A , A = trim_batch(__UpperCamelCase , __UpperCamelCase , attention_mask=__UpperCamelCase )
A = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case :Union[str, Any] =getLogger(__name__)
def lowerCamelCase_ ( lowerCAmelCase__ : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCAmelCase__ ) )
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
A = get_git_info()
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'git_log.json' ) )
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=4 , **lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
with open(lowerCAmelCase__ , 'w' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
with open(lowerCAmelCase__ ) as f:
return json.load(lowerCAmelCase__ )
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = git.Repo(search_parent_directories=lowerCAmelCase__ )
A = {
'repo_id': str(lowerCAmelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : Iterable ) -> List:
'''simple docstring'''
return list(map(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase__ , 'wb' ) as f:
return pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
def remove_articles(lowerCAmelCase__ : List[str] ):
return re.sub(r'\b(a|an|the)\b' , ' ' , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ : str ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ : Optional[Any] ):
A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
A = normalize_answer(lowerCAmelCase__ ).split()
A = normalize_answer(lowerCAmelCase__ ).split()
A = Counter(lowerCAmelCase__ ) & Counter(lowerCAmelCase__ )
A = sum(common.values() )
if num_same == 0:
return 0
A = 1.0 * num_same / len(lowerCAmelCase__ )
A = 1.0 * num_same / len(lowerCAmelCase__ )
A = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> int:
'''simple docstring'''
return normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] ) -> Dict:
'''simple docstring'''
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
A = 0
for hypo, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
em += exact_match_score(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
em /= len(lowerCAmelCase__ )
return {"em": em}
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A = 'dropout_rate'
for p in extra_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and not hasattr(lowerCAmelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
continue
A = p if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) else equivalent_param[p]
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
delattr(lowerCAmelCase__ , lowerCAmelCase__ )
return hparams, config
| 224
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__snake_case :int =logging.get_logger(__name__)
__snake_case :Optional[Any] ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ) -> Dict:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if config is None:
assert isinstance(self.model , __UpperCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
A = self.model.config
else:
A = config
A = data_args
A = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
A = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A = label_smoothed_nll_loss
def __UpperCamelCase ( self : Any , __UpperCamelCase : int ) -> Union[str, Any]:
if self.optimizer is None:
A = ['bias', 'LayerNorm.weight']
A = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
A = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A = Adafactor
A = {'scale_parameter': False, 'relative_step': False}
else:
A = AdamW
A = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
A = self.args.learning_rate
if self.sharded_ddp:
A = OSS(
params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , )
else:
A = optimizer_cls(__UpperCamelCase , **__UpperCamelCase )
if self.lr_scheduler is None:
A = self._get_lr_scheduler(__UpperCamelCase )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __UpperCamelCase ( self : str , __UpperCamelCase : str ) -> Optional[int]:
A = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase )
return scheduler
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
A = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A , A = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2]
else:
# compute label smoothed loss
A = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
A = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
A , A = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict ) -> str:
A = inputs.pop('labels' )
A , A = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return loss
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : nn.Module , __UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] , __UpperCamelCase : bool , __UpperCamelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A = self._prepare_inputs(__UpperCamelCase )
A = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **__UpperCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['max_length'] )
A = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
A , A = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['max_length'] )
return (loss, logits, labels)
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
A = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f''' padded to `max_length`={max_length}''' )
A = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A = tensor
return padded_tensor
| 224
| 1
|
from timeit import timeit
def __UpperCamelCase (lowerCAmelCase : int ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
A = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCamelCase (lowerCAmelCase : int ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
A = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCamelCase () -> None:
def do_benchmark(lowerCAmelCase : int ) -> None:
A = 'import __main__ as z'
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }''' )
A = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowerCAmelCase )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }''' )
A = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowerCAmelCase, )
print(f'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 699
|
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699
| 1
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__UpperCAmelCase = imread(R"digital_image_processing/image_data/lena_small.jpg")
__UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = cn.convert_to_negative(__A )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__A , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Any = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Any = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case: Any = canny.canny(__A )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(__A , 5 , sigma=0.9 ).all()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case: int = conv.img_convolve(__A , __A ).astype(__A )
assert res.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert med.median_filter(__A , 3 ).any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case , snake_case: str = sob.sobel_filter(__A )
assert grad.any() and theta.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Any = sp.make_sepia(__A , 20 )
assert sepia.all()
def lowerCAmelCase_ ( __A : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
snake_case: int = bs.Burkes(imread(__A , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase_ ( __A : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
snake_case: List[Any] = rs.NearestNeighbour(imread(__A , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Optional[Any] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
snake_case: Optional[int] = imread(__A , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case: Tuple = 0
snake_case: Optional[int] = 0
snake_case: Union[str, Any] = image[x_coordinate][y_coordinate]
snake_case: int = lbp.get_neighbors_pixel(
__A , __A , __A , __A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case: Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case: List[Any] = lbp.local_binary_value(__A , __A , __A )
assert lbp_image.any()
| 692
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 692
| 1
|
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCamelCase_ ( ):
lowercase : Optional[Any] = input('''Enter message: ''' )
lowercase : Optional[Any] = input('''Enter key [alphanumeric]: ''' )
lowercase : Union[str, Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase : str = '''encrypt'''
lowercase : Optional[Any] = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith('''d''' ):
lowercase : str = '''decrypt'''
lowercase : Optional[int] = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''encrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''decrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowercase : Optional[Any] = []
lowercase : Tuple = 0
lowercase : str = key.upper()
for symbol in message:
lowercase : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
lowercase : List[str] = 0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 583
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ):
lowercase : List[str] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowercase : Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowercase : Optional[int] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase : str = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowercase : Tuple = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(UpperCAmelCase_ )-1}''' )
if "norm" in key:
lowercase : Dict = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase : int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowercase : Optional[Any] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(UpperCAmelCase_ )-1}''' )
if "layer_norm1" in key:
lowercase : List[str] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowercase : List[str] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowercase : Optional[Any] = key[key.find('''block''' ) + len('''block''' )]
lowercase : Tuple = key.replace(f'''block{idx}''' , f'''block.{int(UpperCAmelCase_ )-1}''' )
if "attn.q" in key:
lowercase : List[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowercase : Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowercase : List[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowercase : Dict = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowercase : Optional[int] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowercase : Union[str, Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowercase : Tuple = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowercase : List[str] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase : List[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowercase : str = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(UpperCAmelCase_ )-1}''' )
if "bot_conv" in key:
lowercase : Any = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
lowercase : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
lowercase : List[str] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
lowercase : Any = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
lowercase : str = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
lowercase : List[str] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
lowercase : List[Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowercase : List[str] = key.replace('''module.last_layer_depth''' , '''head.head''' )
lowercase : Dict = value
return new_state_dict
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase : Dict = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowercase : List[str] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowercase : Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
lowercase : Dict = kv_bias[: config.hidden_sizes[i]]
lowercase : str = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase : Any = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase_ ( ):
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=None ):
lowercase : Optional[int] = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowercase : Any = GLPNImageProcessor()
# prepare image
lowercase : Optional[Any] = prepare_img()
lowercase : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowercase : Any = torch.load(UpperCAmelCase_ , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase : Optional[int] = rename_keys(UpperCAmelCase_ )
# key and value matrices need special treatment
read_in_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# create HuggingFace model and load state dict
lowercase : Optional[int] = GLPNForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# forward pass
lowercase : int = model(UpperCAmelCase_ )
lowercase : Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowercase : List[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowercase : List[str] = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowercase : Optional[int] = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase_ , UpperCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase_ , UpperCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase_ , )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
snake_case__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 583
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """roformer"""
def __init__( self : List[Any] ,_a : Tuple=50000 ,_a : List[str]=None ,_a : int=768 ,_a : List[str]=12 ,_a : Optional[Any]=12 ,_a : Union[str, Any]=3072 ,_a : Optional[int]="gelu" ,_a : Dict=0.1 ,_a : List[str]=0.1 ,_a : Any=1536 ,_a : Optional[Any]=2 ,_a : List[Any]=0.02 ,_a : Dict=1e-12 ,_a : Union[str, Any]=0 ,_a : List[str]=False ,_a : str=True ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Optional[int] = vocab_size
A_ : str = hidden_size if embedding_size is None else embedding_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[str] = hidden_act
A_ : str = intermediate_size
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : List[str] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : Optional[Any] = rotary_value
A_ : Union[str, Any] = use_cache
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
A_ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 716
|
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
__snake_case : Dict = parent
__snake_case : List[Any] = batch_size
__snake_case : int = num_channels
__snake_case : Any = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : List[Any] = size
__snake_case : int = do_normalize
__snake_case : Optional[int] = image_mean
__snake_case : Any = image_std
__snake_case : Optional[int] = do_rescale
__snake_case : int = rescale_factor
__snake_case : Optional[int] = do_pad
def lowercase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if not batched:
__snake_case : Tuple = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__snake_case , __snake_case : Tuple = image.size
else:
__snake_case , __snake_case : Tuple = image.shape[1], image.shape[2]
if w < h:
__snake_case : Tuple = int(self.size['shortest_edge'] * h / w )
__snake_case : Any = self.size['shortest_edge']
elif w > h:
__snake_case : Tuple = self.size['shortest_edge']
__snake_case : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
__snake_case : Optional[Any] = self.size['shortest_edge']
__snake_case : Dict = self.size['shortest_edge']
else:
__snake_case : Optional[Any] = []
for image in image_inputs:
__snake_case , __snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : List[str] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__snake_case : Dict = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
__snake_case : Optional[Any] = YolosImageProcessingTester(self )
@property
def lowercase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
def lowercase_ ( self ):
__snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__snake_case : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Tuple = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__snake_case : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : Dict = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
# Initialize image_processings
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
__snake_case : str = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase )
# create random PyTorch tensors
__snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__snake_case : Optional[Any] = image_processing_a.pad(_UpperCAmelCase , return_tensors='pt' )
__snake_case : Any = image_processing_a(_UpperCAmelCase , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
# prepare image and target
__snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__snake_case : Optional[int] = json.loads(f.read() )
__snake_case : str = {'image_id': 39_769, 'annotations': target}
# encode them
__snake_case : int = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__snake_case : Optional[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__snake_case : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
__snake_case : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
__snake_case : List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
__snake_case : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
__snake_case : Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Any = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
__snake_case : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
__snake_case : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify orig_size
__snake_case : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
__snake_case : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
@slow
def lowercase_ ( self ):
# prepare image, target and masks_path
__snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__snake_case : int = json.loads(f.read() )
__snake_case : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
__snake_case : int = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__snake_case : Dict = YolosImageProcessor(format='coco_panoptic' )
__snake_case : str = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt' )
# verify pixel values
__snake_case : Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase )
__snake_case : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
__snake_case : Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase ) )
# verify boxes
__snake_case : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase ) )
# verify is_crowd
__snake_case : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase ) )
# verify class_labels
__snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase ) )
# verify masks
__snake_case : str = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase )
# verify orig_size
__snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase ) )
# verify size
__snake_case : List[str] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase ) )
| 576
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__magic_name__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__snake_case : Optional[int] = bs[:]
__snake_case : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
__snake_case : str = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] ):
__snake_case : Optional[int] = set()
__snake_case : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : Dict = char
return pairs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
__snake_case : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
__snake_case : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
__snake_case : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
__snake_case : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
__snake_case : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__snake_case : Dict = json.load(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : List[Any] = errors # how to handle errors in decoding
__snake_case : Tuple = bytes_to_unicode()
__snake_case : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
__snake_case : int = merges_handle.read().split('\n' )[1:-1]
__snake_case : str = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : Optional[int] = {}
__snake_case : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case : List[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase_ ( self ):
return len(self.encoder )
def lowercase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , _UpperCAmelCase ):
if token in self.cache:
return self.cache[token]
__snake_case : int = tuple(_UpperCAmelCase )
__snake_case : List[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
__snake_case : Tuple = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case : str = bigram
__snake_case : str = []
__snake_case : str = 0
while i < len(_UpperCAmelCase ):
try:
__snake_case : Optional[int] = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case : Any = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : str = tuple(_UpperCAmelCase )
__snake_case : List[str] = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
__snake_case : List[Any] = get_pairs(_UpperCAmelCase )
__snake_case : Tuple = ' '.join(_UpperCAmelCase )
__snake_case : int = word
return word
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = []
for token in re.findall(self.pat , _UpperCAmelCase ):
__snake_case : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = ''.join(_UpperCAmelCase )
__snake_case : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' )
__snake_case : Union[str, Any] = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__snake_case : Optional[Any] = token_index
writer.write(' '.join(_UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
__snake_case : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ):
__snake_case : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
__snake_case : Optional[int] = ' ' + text
return (text, kwargs)
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__snake_case : str = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case : int = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case : Tuple = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
__snake_case : Dict = len(_UpperCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case : Optional[int] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case : List[str] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 576
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__magic_name__ = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 10_00,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 10_00,
'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'sample_size': 2_56,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__magic_name__ = {
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__magic_name__ = {
'num_train_timesteps': 1_51,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def _A ( __lowercase ) -> Optional[int]:
"""simple docstring"""
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> Any:
"""simple docstring"""
lowerCamelCase__ = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
lowerCamelCase__ = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
lowerCamelCase__ = checkpoint[f"""{old_prefix}.norm.weight"""]
lowerCamelCase__ = checkpoint[f"""{old_prefix}.norm.bias"""]
lowerCamelCase__ = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _A ( __lowercase , __lowercase ) -> Any:
"""simple docstring"""
lowerCamelCase__ = torch.load(A_ , map_location="""cpu""" )
lowerCamelCase__ = {}
lowerCamelCase__ = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ = checkpoint["""label_emb.weight"""]
lowerCamelCase__ = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ = unet_config["""down_block_types"""]
lowerCamelCase__ = unet_config["""layers_per_block"""]
lowerCamelCase__ = unet_config["""attention_head_dim"""]
lowerCamelCase__ = unet_config["""block_out_channels"""]
lowerCamelCase__ = 1
lowerCamelCase__ = channels_list[0]
for i, layer_type in enumerate(A_ ):
lowerCamelCase__ = channels_list[i]
lowerCamelCase__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
lowerCamelCase__ = f"""down_blocks.{i}.resnets.{j}"""
lowerCamelCase__ = f"""input_blocks.{current_layer}.0"""
lowerCamelCase__ = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
lowerCamelCase__ = f"""down_blocks.{i}.resnets.{j}"""
lowerCamelCase__ = f"""input_blocks.{current_layer}.0"""
lowerCamelCase__ = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
lowerCamelCase__ = f"""down_blocks.{i}.attentions.{j}"""
lowerCamelCase__ = f"""input_blocks.{current_layer}.1"""
lowerCamelCase__ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
lowerCamelCase__ = f"""down_blocks.{i}.downsamplers.0"""
lowerCamelCase__ = f"""input_blocks.{current_layer}.0"""
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
lowerCamelCase__ = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ = """mid_block.resnets.0"""
lowerCamelCase__ = """middle_block.0"""
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ )
lowerCamelCase__ = """mid_block.attentions.0"""
lowerCamelCase__ = """middle_block.1"""
lowerCamelCase__ = convert_attention(A_ , A_ , A_ , A_ , A_ )
lowerCamelCase__ = """mid_block.resnets.1"""
lowerCamelCase__ = """middle_block.2"""
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ )
lowerCamelCase__ = 0
lowerCamelCase__ = unet_config["""up_block_types"""]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ = f"""up_blocks.{i}.resnets.{j}"""
lowerCamelCase__ = f"""output_blocks.{current_layer}.0"""
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
lowerCamelCase__ = f"""up_blocks.{i}.upsamplers.0"""
lowerCamelCase__ = f"""output_blocks.{current_layer-1}.1"""
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ = f"""up_blocks.{i}.resnets.{j}"""
lowerCamelCase__ = f"""output_blocks.{current_layer}.0"""
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
lowerCamelCase__ = f"""up_blocks.{i}.attentions.{j}"""
lowerCamelCase__ = f"""output_blocks.{current_layer}.1"""
lowerCamelCase__ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
lowerCamelCase__ = f"""up_blocks.{i}.upsamplers.0"""
lowerCamelCase__ = f"""output_blocks.{current_layer-1}.2"""
lowerCamelCase__ = convert_resnet(A_ , A_ , A_ , A_ )
lowerCamelCase__ = checkpoint["""out.0.weight"""]
lowerCamelCase__ = checkpoint["""out.0.bias"""]
lowerCamelCase__ = checkpoint["""out.2.weight"""]
lowerCamelCase__ = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
__magic_name__ = parser.parse_args()
__magic_name__ = strabool(args.class_cond)
__magic_name__ = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__magic_name__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__magic_name__ = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__magic_name__ = None
__magic_name__ = con_pt_to_diffuser(args.unet_path, unet_config)
__magic_name__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__magic_name__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__magic_name__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__magic_name__ = CMStochasticIterativeScheduler(**scheduler_config)
__magic_name__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 710
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = BigBirdConfig.from_json_file(__lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCamelCase__ = BigBirdForQuestionAnswering(__lowercase )
else:
lowerCamelCase__ = BigBirdForPreTraining(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowercase , __lowercase , is_trivia_qa=__lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 258
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowercase__ ):
def is_in_circle(lowercase__ , lowercase__ ) -> bool:
lowercase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
lowercase__ = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _A ( lowercase__ , lowercase__ , lowercase__ = 0.0 , lowercase__ = 1.0 , ):
return mean(
function_to_integrate(uniform(lowercase__ , lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def _A ( lowercase__ , lowercase__ = 0.0 , lowercase__ = 1.0 ):
def identity_function(lowercase__ ) -> float:
return x
lowercase__ = area_under_curve_estimator(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowercase__ = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def _A ( lowercase__ ):
def function_to_integrate(lowercase__ ) -> float:
return sqrt(4.0 - x * x )
lowercase__ = area_under_curve_estimator(
lowercase__ , lowercase__ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
"""simple docstring"""
def A_ ( lowercase = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowercase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 470
| 0
|
'''simple docstring'''
import os
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = os.path.join(os.path.dirname(lowerCamelCase__ ) , "num.txt" )
with open(lowerCamelCase__ ) as file_hand:
return str(sum(int(lowerCamelCase__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 10
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48
|
'''simple docstring'''
from math import loga
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) ->str:
'''simple docstring'''
super().__init__()
__a = pad_token_id
__a = max_length
__a = vocab
__a = merges
__a = BytePairTokenizer(__lowerCamelCase , __lowerCamelCase , sequence_length=__lowerCamelCase )
@classmethod
def __UpperCamelCase ( cls , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = [" ".join(__lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
__a = tokenizer.get_vocab()
return cls(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
@classmethod
def __UpperCamelCase ( cls , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
__a = GPTaTokenizer.from_pretrained(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
return cls.from_tokenizer(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
@classmethod
def __UpperCamelCase ( cls , lowerCamelCase ) ->Dict:
'''simple docstring'''
return cls(**__lowerCamelCase )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->int:
'''simple docstring'''
__a = self.tf_tokenizer(__lowerCamelCase )
__a = tf.ones_like(__lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
__a = max_length if max_length is not None else self.max_length
if max_length is not None:
__a = pad_model_inputs(
__lowerCamelCase , max_seq_length=__lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 717
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
__UpperCamelCase : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
__UpperCamelCase : int = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
__UpperCamelCase : str = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
__a = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__a = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__a = evaluate(dataset=lowerCamelCase , predictions=lowerCamelCase )
return score
| 270
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase , _lowerCamelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2",revision="bf16",dtype=jnp.bfloataa,)
_lowerCamelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowerCamelCase : Tuple = jax.device_count()
_lowerCamelCase : Dict = num_samples * [prompt]
_lowerCamelCase : int = sd_pipe.prepare_inputs(__A )
_lowerCamelCase : Union[str, Any] = replicate(__A )
_lowerCamelCase : Any = shard(__A )
_lowerCamelCase : Dict = jax.random.PRNGKey(0 )
_lowerCamelCase : List[str] = jax.random.split(__A,jax.device_count() )
_lowerCamelCase : List[Any] = sd_pipe(__A,__A,__A,num_inference_steps=2_5,jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_lowerCamelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowerCamelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : int = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = "stabilityai/stable-diffusion-2"
_lowerCamelCase , _lowerCamelCase : int = FlaxDPMSolverMultistepScheduler.from_pretrained(__A,subfolder="scheduler" )
_lowerCamelCase , _lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
__A,scheduler=__A,revision="bf16",dtype=jnp.bfloataa,)
_lowerCamelCase : List[str] = scheduler_params
_lowerCamelCase : List[str] = "A painting of a squirrel eating a burger"
_lowerCamelCase : List[str] = jax.device_count()
_lowerCamelCase : Dict = num_samples * [prompt]
_lowerCamelCase : Tuple = sd_pipe.prepare_inputs(__A )
_lowerCamelCase : Any = replicate(__A )
_lowerCamelCase : Any = shard(__A )
_lowerCamelCase : Tuple = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[Any] = jax.random.split(__A,jax.device_count() )
_lowerCamelCase : Optional[Any] = sd_pipe(__A,__A,__A,num_inference_steps=2_5,jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_lowerCamelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowerCamelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : str = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 44
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = StableDiffusionSAGPipeline
__lowerCamelCase = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ : str= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase__ : Dict= DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : Tuple= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__ : List[Any]= CLIPTextModel(UpperCAmelCase_ )
lowercase__ : Any= CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : List[Any]= {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("mps" ):
lowercase__ : Union[str, Any]= torch.manual_seed(UpperCAmelCase_ )
else:
lowercase__ : Optional[int]= torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowercase__ : Optional[Any]= {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase__ : Optional[Any]= sag_pipe.to(UpperCAmelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ : Dict= '.'
lowercase__ : Any= torch.manual_seed(0 )
lowercase__ : Dict= sag_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase__ : List[str]= output.images
lowercase__ : List[Any]= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any]= np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ : List[Any]= sag_pipe.to(UpperCAmelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ : Any= '.'
lowercase__ : List[Any]= torch.manual_seed(0 )
lowercase__ : List[str]= sag_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase__ : Dict= output.images
lowercase__ : List[Any]= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[Any]= np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ : List[str]= sag_pipe.to(UpperCAmelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ : List[str]= '.'
lowercase__ : List[str]= torch.manual_seed(0 )
lowercase__ : List[Any]= sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
lowercase__ : List[Any]= output.images
assert image.shape == (1, 512, 768, 3)
| 706
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85
| 0
|
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "microsoft/speecht5_tts"
_UpperCamelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_UpperCamelCase = "text_reader"
_UpperCamelCase = SpeechTaProcessor
_UpperCamelCase = SpeechTaForTextToSpeech
_UpperCamelCase = SpeechTaHifiGan
_UpperCamelCase = ["text"]
_UpperCamelCase = ["audio"]
def __SCREAMING_SNAKE_CASE ( self ) -> int:
if self.post_processor is None:
a_ = 'microsoft/speecht5_hifigan'
super().setup()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> int:
a_ = self.pre_processor(text=UpperCAmelCase__ , return_tensors='pt' , truncation=UpperCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
a_ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
a_ = torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Any:
with torch.no_grad():
return self.model.generate_speech(**UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> Any:
with torch.no_grad():
return self.post_processor(UpperCAmelCase__ ).cpu().detach()
| 697
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["audio_values", "audio_mask"]
def __init__( self , snake_case=2_0_4_8 , snake_case=1 , snake_case=[1_6, 1_6] , snake_case=1_2_8 , snake_case=4_4_1_0_0 , snake_case=8_6 , snake_case=2_0_4_8 , snake_case=0.0 , **snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case , )
_UpperCAmelCase : Dict =spectrogram_length
_UpperCAmelCase : Any =num_channels
_UpperCAmelCase : Optional[int] =patch_size
_UpperCAmelCase : List[str] =feature_size // self.patch_size[1]
_UpperCAmelCase : Optional[int] =n_fft
_UpperCAmelCase : int =sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : int =sampling_rate
_UpperCAmelCase : List[Any] =padding_value
_UpperCAmelCase : Optional[int] =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ).T
def lowerCAmelCase ( self , snake_case) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Any =spectrogram(
snake_case , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
_UpperCAmelCase : Any =log_spec[:, :-1]
_UpperCAmelCase : List[str] =log_spec - 20.0
_UpperCAmelCase : Union[str, Any] =np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , snake_case , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = False , **snake_case , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
_UpperCAmelCase : Tuple =isinstance(snake_case , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_UpperCAmelCase : str =is_batched_numpy or (
isinstance(snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCAmelCase : List[str] =[np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray):
_UpperCAmelCase : List[Any] =np.asarray(snake_case , dtype=np.floataa)
elif isinstance(snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCAmelCase : List[Any] =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase : Dict =[np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : List[Any] =[
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case):
_UpperCAmelCase : Dict =[np.asarray(snake_case , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Tuple =max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : Any =[
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : int =np.array(snake_case).astype(np.floataa)
# convert into correct format for padding
_UpperCAmelCase : List[Any] =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : List[str] =np.ones([len(snake_case), 1, max_time_len, self.feature_size]).astype(np.floataa)
_UpperCAmelCase : Any =padded_audio_features * self.padding_value
for i in range(len(snake_case)):
_UpperCAmelCase : List[str] =audio_features[i]
_UpperCAmelCase : Dict =feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : List[str] ={'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_UpperCAmelCase : Optional[int] ={'audio_values': padded_audio_features}
_UpperCAmelCase : Any =BatchFeature(data=snake_case , tensor_type=snake_case)
return encoded_inputs
| 446
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCAmelCase = HfArgumentParser(InitializationArguments)
__lowerCAmelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCAmelCase = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__lowerCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCAmelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 718
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Union[str, Any] = 'upernet'
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=512 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=[1, 2, 3, 6] , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=384 , lowerCAmelCase__=256 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=255 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__: str = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: str = backbone_config.get('model_type' )
lowercase__: Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__: Dict = config_class.from_dict(lowerCAmelCase__ )
lowercase__: List[Any] = backbone_config
lowercase__: Union[str, Any] = hidden_size
lowercase__: Tuple = initializer_range
lowercase__: Optional[int] = pool_scales
lowercase__: Union[str, Any] = use_auxiliary_head
lowercase__: Any = auxiliary_loss_weight
lowercase__: Tuple = auxiliary_in_channels
lowercase__: Optional[Any] = auxiliary_channels
lowercase__: List[Any] = auxiliary_num_convs
lowercase__: List[str] = auxiliary_concat_input
lowercase__: Any = loss_ignore_index
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = copy.deepcopy(self.__dict__ )
lowercase__: List[Any] = self.backbone_config.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 335
| 0
|
"""simple docstring"""
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowerCamelCase = float('nan')
class __A :
def __init__( self : Union[str, Any] , __snake_case : Optional[Any] ) -> int:
__magic_name__: Optional[int] = sys.stdout
__magic_name__: int = open(_a , """a""" )
def __getattr__( self : str , __snake_case : Union[str, Any] ) -> List[Any]:
return getattr(self.stdout , _a )
def lowerCamelCase__ ( self : Tuple , __snake_case : Any ) -> Optional[int]:
self.stdout.write(_a )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , _a , 0 , re.M ) )
def a ( __UpperCAmelCase : List[str]=8_0 , __UpperCAmelCase : List[Any]=False ) -> Dict:
__magic_name__: str = []
# deal with critical env vars
__magic_name__: Tuple = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
__magic_name__: Tuple = os.environ.get(__lowerCAmelCase , __lowerCAmelCase )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
__magic_name__: int = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(__lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__magic_name__: List[Any] = []
__magic_name__: Dict = """"""
while len(__lowerCAmelCase ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__lowerCAmelCase )
__magic_name__: List[str] = """"""
return "\\\n".join(__lowerCAmelCase )
def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
# unwrap multi-line input
__magic_name__: Union[str, Any] = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
__magic_name__: int = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
__magic_name__: str = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] ) -> Any:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
__magic_name__: List[str] = subprocess.run(__lowerCAmelCase , capture_output=__lowerCAmelCase , text=__lowerCAmelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
__magic_name__: Any = variation.replace(""" """ , """-""" )
with open(Path(__lowerCAmelCase ) / f'log.{prefix}.stdout.txt' , """w""" ) as f:
f.write(result.stdout )
with open(Path(__lowerCAmelCase ) / f'log.{prefix}.stderr.txt' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , """r""" , encoding="""utf-8""" ) as f:
__magic_name__: Optional[Any] = json.load(__lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , ) -> List[str]:
__magic_name__: int = []
__magic_name__: int = []
__magic_name__: Optional[Any] = f'{id}: {variation:<{longest_variation_len}}'
__magic_name__: Any = f'{preamble}: '
__magic_name__: Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__lowerCAmelCase ) , desc=__lowerCAmelCase , leave=__lowerCAmelCase ):
__magic_name__: Tuple = process_run_single(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__: List[str] = single_run_metrics[target_metric_key]
if not math.isnan(__lowerCAmelCase ):
metrics.append(__lowerCAmelCase )
results.append(__lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__magic_name__: List[Any] = f'\33[2K\r{outcome}'
if len(__lowerCAmelCase ) > 0:
__magic_name__: Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__magic_name__: Dict = round(mean_metrics[target_metric_key] , 2 )
__magic_name__: Optional[int] = f'{outcome} {mean_target}'
if len(__lowerCAmelCase ) > 1:
results_str += f' {tuple(round(__lowerCAmelCase , 2 ) for x in results )}'
print(__lowerCAmelCase )
__magic_name__: List[str] = variation
return mean_metrics
else:
print(__lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def a ( ) -> str:
__magic_name__: List[str] = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n'
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> Union[str, Any]:
__magic_name__: Dict = pd.DataFrame(__lowerCAmelCase )
__magic_name__: List[Any] = """variation"""
__magic_name__: Dict = """diff_%"""
__magic_name__: int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__magic_name__: Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__magic_name__: Union[str, Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__lowerCAmelCase ):
__magic_name__: int = df.apply(
lambda __UpperCAmelCase : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
__magic_name__: Optional[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__magic_name__: Any = df.reindex(__lowerCAmelCase , axis="""columns""" ) # reorder cols
# capitalize
__magic_name__: Optional[int] = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
__magic_name__: List[Any] = df.rename(lambda __UpperCAmelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
__magic_name__: int = df.rename(lambda __UpperCAmelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
__magic_name__: int = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__lowerCAmelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__lowerCAmelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(__lowerCAmelCase ) )
def a ( ) -> Any:
__magic_name__: List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=__lowerCAmelCase , type=__lowerCAmelCase , nargs="""+""" , required=__lowerCAmelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=__lowerCAmelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=__lowerCAmelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=__lowerCAmelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=__lowerCAmelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
__magic_name__: List[Any] = parser.parse_args()
__magic_name__: Tuple = args.output_dir
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
__magic_name__: Optional[int] = get_base_command(__lowerCAmelCase , __lowerCAmelCase )
# split each dimension into its --foo variations
__magic_name__: Dict = [list(map(str.strip , re.split(R"""\|""" , __lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__magic_name__: Union[str, Any] = list(map(str.strip , map(""" """.join , itertools.product(*__lowerCAmelCase ) ) ) )
__magic_name__: str = max(len(__lowerCAmelCase ) for x in variations )
# split wanted keys
__magic_name__: Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
__magic_name__: Dict = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
__magic_name__: Optional[Any] = Tee(__lowerCAmelCase )
print(f'\n*** Running {len(__lowerCAmelCase )} benchmarks:' )
print(f'Base command: {" ".join(__lowerCAmelCase )}' )
__magic_name__: Union[str, Any] = """variation"""
__magic_name__: Dict = []
for id, variation in enumerate(tqdm(__lowerCAmelCase , desc="""Total completion: """ , leave=__lowerCAmelCase ) ):
__magic_name__: Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.repeat_times , __lowerCAmelCase , args.verbose , ) )
process_results(__lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.base_variation , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 96
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a :str = 637_8137.0
a :Optional[Any] = 635_6752.31_4245
a :List[Any] = 6_378_137
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
SCREAMING_SNAKE_CASE__ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
SCREAMING_SNAKE_CASE__ : Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
SCREAMING_SNAKE_CASE__ : List[str] = (b_lata + b_lata) / 2
SCREAMING_SNAKE_CASE__ : Dict = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
SCREAMING_SNAKE_CASE__ : Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : str = cos(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
SCREAMING_SNAKE_CASE__ : int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : int = sin(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = 'nat'
_snake_case : List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , __lowerCAmelCase : int=4 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=64 , __lowerCAmelCase : int=[3, 4, 6, 5] , __lowerCAmelCase : List[str]=[2, 4, 8, 16] , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : List[str]=3.0 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 275
|
"""simple docstring"""
import datasets
UpperCAmelCase__ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCAmelCase__ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCAmelCase__ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
| 275
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = AltDiffusionPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCamelCase_ = 77
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Optional[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
torch.manual_seed(0 )
UpperCamelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase_ = RobertaSeriesModelWithTransformation(_UpperCAmelCase )
UpperCamelCase_ = text_encoder
UpperCamelCase_ = AltDiffusionPipeline(**_UpperCAmelCase )
UpperCamelCase_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCamelCase_ = 'A photo of an astronaut'
UpperCamelCase_ = alt_pipe(**_UpperCAmelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
UpperCamelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase_ = RobertaSeriesModelWithTransformation(_UpperCAmelCase )
UpperCamelCase_ = text_encoder
UpperCamelCase_ = AltDiffusionPipeline(**_UpperCAmelCase )
UpperCamelCase_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCamelCase_ = alt_pipe(**_UpperCAmelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
UpperCamelCase_ = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_UpperCAmelCase )
UpperCamelCase_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = alt_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
UpperCamelCase_ = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
UpperCamelCase_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = alt_pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='numpy' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 23
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23
| 1
|
"""simple docstring"""
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->list:
UpperCAmelCase__ = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase__ = j
return prefix_result
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->int:
return max(prefix_function(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 422
|
"""simple docstring"""
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Tuple: # noqa: E741
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0] * n
UpperCAmelCase__ = [False] * n
UpperCAmelCase__ = [False] * n
def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
UpperCAmelCase__ = True
UpperCAmelCase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase__ = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase__ = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
UpperCAmelCase__ = 0
UpperCAmelCase__ = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
a : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 422
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: Dict = 2 , SCREAMING_SNAKE_CASE: Union[str, Any] = 1 , SCREAMING_SNAKE_CASE: Optional[Any] = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: List[Any] ) -> int:
return (pow(SCREAMING_SNAKE_CASE , 2 ) + step) % modulus
for _ in range(SCREAMING_SNAKE_CASE ):
# These track the position within the cycle detection logic.
_lowerCAmelCase = seed
_lowerCAmelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_lowerCAmelCase = rand_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = rand_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = rand_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_lowerCAmelCase = gcd(hare - tortoise , SCREAMING_SNAKE_CASE )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_lowerCAmelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
_snake_case = args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 580
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _A ( __magic_name__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 511
| 0
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A ( SCREAMING_SNAKE_CASE_ ):
def __get__( self : int , __snake_case : Dict , __snake_case : Optional[Any]=None ) -> int:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__magic_name__: str = """__cached_""" + self.fget.__name__
__magic_name__: Optional[int] = getattr(__snake_case , __snake_case , __snake_case )
if cached is None:
__magic_name__: str = self.fget(__snake_case )
setattr(__snake_case , __snake_case , __snake_case )
return cached
def a ( __UpperCAmelCase : List[Any] ) -> Any:
__magic_name__: Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def a ( __UpperCAmelCase : Tuple ) -> List[str]:
if is_torch_fx_proxy(__UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(__UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__UpperCAmelCase , np.ndarray )
def a ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return isinstance(__UpperCAmelCase , np.ndarray )
def a ( __UpperCAmelCase : Optional[int] ) -> List[Any]:
return _is_numpy(__UpperCAmelCase )
def a ( __UpperCAmelCase : List[str] ) -> Any:
import torch
return isinstance(__UpperCAmelCase , torch.Tensor )
def a ( __UpperCAmelCase : int ) -> Dict:
return False if not is_torch_available() else _is_torch(__UpperCAmelCase )
def a ( __UpperCAmelCase : Optional[int] ) -> Optional[int]:
import torch
return isinstance(__UpperCAmelCase , torch.device )
def a ( __UpperCAmelCase : Any ) -> Tuple:
return False if not is_torch_available() else _is_torch_device(__UpperCAmelCase )
def a ( __UpperCAmelCase : Any ) -> Tuple:
import torch
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , __UpperCAmelCase ):
__magic_name__: int = getattr(__UpperCAmelCase , __UpperCAmelCase )
else:
return False
return isinstance(__UpperCAmelCase , torch.dtype )
def a ( __UpperCAmelCase : Dict ) -> Optional[Any]:
return False if not is_torch_available() else _is_torch_dtype(__UpperCAmelCase )
def a ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
import tensorflow as tf
return isinstance(__UpperCAmelCase , tf.Tensor )
def a ( __UpperCAmelCase : Union[str, Any] ) -> Tuple:
return False if not is_tf_available() else _is_tensorflow(__UpperCAmelCase )
def a ( __UpperCAmelCase : int ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__UpperCAmelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(__UpperCAmelCase )
return type(__UpperCAmelCase ) == tf.Tensor
def a ( __UpperCAmelCase : int ) -> Dict:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__UpperCAmelCase )
def a ( __UpperCAmelCase : Dict ) -> Union[str, Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(__UpperCAmelCase , jnp.ndarray )
def a ( __UpperCAmelCase : Any ) -> List[str]:
return False if not is_flax_available() else _is_jax(__UpperCAmelCase )
def a ( __UpperCAmelCase : List[Any] ) -> Dict:
if isinstance(__UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(__UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return [to_py_obj(__UpperCAmelCase ) for o in obj]
elif is_tf_tensor(__UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__UpperCAmelCase ):
return np.asarray(__UpperCAmelCase ).tolist()
elif isinstance(__UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a ( __UpperCAmelCase : Dict ) -> str:
if isinstance(__UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(__UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return np.array(__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(__UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__UpperCAmelCase ):
return np.asarray(__UpperCAmelCase )
else:
return obj
class __A ( SCREAMING_SNAKE_CASE_ ):
def lowerCamelCase__ ( self : List[str] ) -> str:
__magic_name__: Union[str, Any] = fields(self )
# Safety and consistency checks
if not len(__snake_case ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
__magic_name__: Tuple = getattr(self , class_fields[0].name )
__magic_name__: int = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__snake_case ):
if isinstance(__snake_case , __snake_case ):
__magic_name__: Optional[Any] = first_field.items()
__magic_name__: Union[str, Any] = True
else:
try:
__magic_name__: Tuple = iter(__snake_case )
__magic_name__: int = True
except TypeError:
__magic_name__: int = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__snake_case ):
if (
not isinstance(__snake_case , (list, tuple) )
or not len(__snake_case ) == 2
or not isinstance(element[0] , __snake_case )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__: Optional[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__: Union[str, Any] = element[1]
elif first_field is not None:
__magic_name__: Optional[Any] = first_field
else:
for field in class_fields:
__magic_name__: List[str] = getattr(self , field.name )
if v is not None:
__magic_name__: List[str] = v
def __delitem__( self : Optional[Any] , *__snake_case : Union[str, Any] , **__snake_case : List[str] ) -> int:
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase__ ( self : int , *__snake_case : Union[str, Any] , **__snake_case : Optional[Any] ) -> Union[str, Any]:
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase__ ( self : Optional[Any] , *__snake_case : Optional[Any] , **__snake_case : str ) -> List[str]:
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def lowerCamelCase__ ( self : int , *__snake_case : Dict , **__snake_case : Optional[int] ) -> Tuple:
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Tuple , __snake_case : Optional[Any] ) -> List[str]:
if isinstance(__snake_case , __snake_case ):
__magic_name__: str = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Union[str, Any] , __snake_case : str , __snake_case : Any ) -> List[str]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__snake_case , __snake_case )
super().__setattr__(__snake_case , __snake_case )
def __setitem__( self : Any , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Optional[Any]:
# Will raise a KeyException if needed
super().__setitem__(__snake_case , __snake_case )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__snake_case , __snake_case )
def lowerCamelCase__ ( self : str ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , __snake_case : int ) -> List[str]:
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "longest"
UpperCAmelCase__ = "max_length"
UpperCAmelCase__ = "do_not_pad"
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "pt"
UpperCAmelCase__ = "tf"
UpperCAmelCase__ = "np"
UpperCAmelCase__ = "jax"
class __A :
def __init__( self : List[Any] , __snake_case : List[ContextManager] ) -> int:
__magic_name__: Dict = context_managers
__magic_name__: Union[str, Any] = ExitStack()
def __enter__( self : int ) -> Dict:
for context_manager in self.context_managers:
self.stack.enter_context(__snake_case )
def __exit__( self : Union[str, Any] , *__snake_case : int , **__snake_case : Optional[int] ) -> List[str]:
self.stack.__exit__(*__snake_case , **__snake_case )
def a ( __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
__magic_name__: str = infer_framework(__UpperCAmelCase )
if framework == "tf":
__magic_name__: List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__: Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__: List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a ( __UpperCAmelCase : Optional[int] ) -> List[Any]:
__magic_name__: Optional[Any] = model_class.__name__
__magic_name__: Union[str, Any] = infer_framework(__UpperCAmelCase )
if framework == "tf":
__magic_name__: Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__: Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__: Any = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a ( __UpperCAmelCase : MutableMapping , __UpperCAmelCase : str = "" , __UpperCAmelCase : str = "." ) -> int:
def _flatten_dict(__UpperCAmelCase : List[Any] , __UpperCAmelCase : str="" , __UpperCAmelCase : List[Any]="." ):
for k, v in d.items():
__magic_name__: Union[str, Any] = str(__UpperCAmelCase ) + delimiter + str(__UpperCAmelCase ) if parent_key else k
if v and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
yield from flatten_dict(__UpperCAmelCase , __UpperCAmelCase , delimiter=__UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
@contextmanager
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : bool = False ) -> Optional[int]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=None ) -> List[str]:
if is_numpy_array(__UpperCAmelCase ):
return np.transpose(__UpperCAmelCase , axes=__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.T if axes is None else array.permute(*__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(__UpperCAmelCase , perm=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.transpose(__UpperCAmelCase , axes=__UpperCAmelCase )
else:
raise ValueError(f'Type not supported for transpose: {type(__UpperCAmelCase )}.' )
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ) -> Tuple:
if is_numpy_array(__UpperCAmelCase ):
return np.reshape(__UpperCAmelCase , __UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.reshape(*__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(__UpperCAmelCase , __UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.reshape(__UpperCAmelCase , __UpperCAmelCase )
else:
raise ValueError(f'Type not supported for reshape: {type(__UpperCAmelCase )}.' )
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None ) -> Optional[int]:
if is_numpy_array(__UpperCAmelCase ):
return np.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
else:
raise ValueError(f'Type not supported for squeeze: {type(__UpperCAmelCase )}.' )
def a ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> Tuple:
if is_numpy_array(__UpperCAmelCase ):
return np.expand_dims(__UpperCAmelCase , __UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.unsqueeze(dim=__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.expand_dims(__UpperCAmelCase , axis=__UpperCAmelCase )
else:
raise ValueError(f'Type not supported for expand_dims: {type(__UpperCAmelCase )}.' )
def a ( __UpperCAmelCase : Optional[Any] ) -> Tuple:
if is_numpy_array(__UpperCAmelCase ):
return np.size(__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.size(__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(__UpperCAmelCase )}.' )
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> Tuple:
for key, value in auto_map.items():
if isinstance(__UpperCAmelCase , (tuple, list) ):
__magic_name__: Optional[int] = [f'{repo_id}--{v}' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__: Any = f'{repo_id}--{value}'
return auto_map
def a ( __UpperCAmelCase : Optional[int] ) -> List[Any]:
for base_class in inspect.getmro(__UpperCAmelCase ):
__magic_name__: Dict = base_class.__module__
__magic_name__: Optional[int] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 213
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCamelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a ( __UpperCAmelCase : int ) -> Tuple:
__magic_name__: int = _TestCommandArgs(dataset=__UpperCAmelCase , all_configs=__UpperCAmelCase , save_infos=__UpperCAmelCase )
__magic_name__: List[str] = TestCommand(*__UpperCAmelCase )
test_command.run()
__magic_name__: Union[str, Any] = os.path.join(__UpperCAmelCase , """README.md""" )
assert os.path.exists(__UpperCAmelCase )
__magic_name__: str = DatasetInfosDict.from_directory(__UpperCAmelCase )
__magic_name__: Optional[int] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__magic_name__, __magic_name__: Tuple = getattr(dataset_infos["""default"""] , __UpperCAmelCase ), getattr(expected_dataset_infos["""default"""] , __UpperCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__UpperCAmelCase , __UpperCAmelCase )
elif key == "splits":
assert list(__UpperCAmelCase ) == list(__UpperCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 213
| 1
|
from datetime import datetime
import requests
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__UpperCamelCase = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_lowercase ).content
if __name__ == "__main__":
__snake_case = input('''Enter Video/IGTV url: ''').strip()
__snake_case = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def a_ ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , np.ndarray ):
return list(tensor.shape )
A_ = tf.shape(UpperCamelCase_ )
if tensor.shape == tf.TensorShape(UpperCamelCase_ ):
return dynamic
A_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase_ )]
def a_ ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCamelCase_ , name=UpperCamelCase_ )
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1e-5 , UpperCamelCase_=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
A_ , A_ = tf.nn.moments(UpperCamelCase_ , axes=[axis] , keepdims=UpperCamelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A_ = [1] * inputs.shape.rank
A_ = shape_list(UpperCamelCase_ )[axis]
A_ = tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
A_ = tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
# Compute layer normalization using the batch_normalization
# function.
A_ = tf.nn.batch_normalization(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , offset=UpperCamelCase_ , scale=UpperCamelCase_ , variance_epsilon=UpperCamelCase_ , )
return outputs
def a_ ( UpperCamelCase_ , UpperCamelCase_=0 , UpperCamelCase_=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A_ = tf.shape(UpperCamelCase_ )
A_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
def a_ ( UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , tf.Tensor ):
A_ = tf.convert_to_tensor(UpperCamelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A_ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = "input_ids" ):
tf.debugging.assert_less(
UpperCamelCase_ , tf.cast(UpperCamelCase_ , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase_ )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A_ = [x for x in data if len(UpperCamelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
A_ = np.asarray(UpperCamelCase_ )
A_ = 1
A_ = np.array_split(UpperCamelCase_ , UpperCamelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A_ = np.array_split(UpperCamelCase_ , UpperCamelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase_ ):
A_ = chunk_data
else:
A_ = data
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
if name in group.attrs:
A_ = [n.decode("utf8" ) if hasattr(UpperCamelCase_ , "decode" ) else n for n in group.attrs[name]]
else:
A_ = []
A_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(UpperCamelCase_ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def a_ ( UpperCamelCase_ ):
def _expand_single_ad_tensor(UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase_ )
| 452
| 0
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : int , ):
"""simple docstring"""
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = False
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = """gelu"""
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : str , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = TFDistilBertModel(config=lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
A__ = [input_ids, input_mask]
A__ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = TFDistilBertForMaskedLM(config=lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : Dict , _snake_case : Any ):
"""simple docstring"""
A__ = TFDistilBertForQuestionAnswering(config=lowercase__ )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
A__ = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int , _snake_case : Optional[int] , _snake_case : str ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDistilBertForSequenceClassification(lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int ):
"""simple docstring"""
A__ = self.num_choices
A__ = TFDistilBertForMultipleChoice(lowercase__ )
A__ = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
A__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : List[str] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Any , _snake_case : int , _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDistilBertForTokenClassification(lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(A__) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A__ : Dict = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Tuple = False
A__ : int = False
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = TFDistilBertModelTester(self )
A__ = ConfigTester(self , config_class=lowercase__ , dim=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__ )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__ )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__ )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ = TFDistilBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase__ )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase__ )
A__ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-4 )
| 704
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _snake_case ( A_ : Any , A_ : Any=False ):
"""simple docstring"""
a_ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _snake_case ( A_ : Dict , A_ : str , A_ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a_ : Optional[int] = """"""
else:
a_ : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : int = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
a_ : Optional[int] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
a_ : Dict = in_proj_bias[: config.hidden_size]
a_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : str = in_proj_weight[
-config.hidden_size :, :
]
a_ : List[str] = in_proj_bias[-config.hidden_size :]
def _snake_case ( A_ : List[Any] ):
"""simple docstring"""
a_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def _snake_case ( A_ : Optional[int] ):
"""simple docstring"""
a_ : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def _snake_case ( A_ : Optional[int] , A_ : List[Any] , A_ : Any ):
"""simple docstring"""
a_ : List[str] = dct.pop(A_ )
a_ : Optional[Any] = val
def _snake_case ( A_ : Any , A_ : int ):
"""simple docstring"""
a_ : Any = ViTMSNConfig()
a_ : Optional[Any] = 1000
a_ : int = """datasets/huggingface/label-files"""
a_ : Optional[int] = """imagenet-1k-id2label.json"""
a_ : Any = json.load(open(hf_hub_download(A_ , A_ ) , """r""" ) )
a_ : str = {int(A_ ): v for k, v in idalabel.items()}
a_ : Optional[int] = idalabel
a_ : Optional[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a_ : Union[str, Any] = 384
a_ : Tuple = 1536
a_ : Dict = 6
elif "l16" in checkpoint_url:
a_ : Tuple = 1024
a_ : int = 4096
a_ : Tuple = 24
a_ : Any = 16
a_ : Dict = 0.1
elif "b4" in checkpoint_url:
a_ : Optional[Any] = 4
elif "l7" in checkpoint_url:
a_ : List[str] = 7
a_ : List[Any] = 1024
a_ : str = 4096
a_ : Tuple = 24
a_ : List[Any] = 16
a_ : List[str] = 0.1
a_ : Optional[Any] = ViTMSNModel(A_ )
a_ : Tuple = torch.hub.load_state_dict_from_url(A_ , map_location="""cpu""" )["""target_encoder"""]
a_ : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(A_ )
a_ : List[Any] = create_rename_keys(A_ , base_model=A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , base_model=A_ )
model.load_state_dict(A_ )
model.eval()
a_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Optional[Any] = Image.open(requests.get(A_ , stream=A_ ).raw )
a_ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=A_ , image_std=A_ )
a_ : str = image_processor(images=A_ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
a_ : Any = model(**A_ )
a_ : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a_ : Tuple = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
a_ : List[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
a_ : List[str] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
a_ : Tuple = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
a_ : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , A_ , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
__snake_case: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__snake_case: str = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 577
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
snake_case_ : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
snake_case_ : str = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
UpperCAmelCase_ : Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(__magic_name__ , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase_ : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase_ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
UpperCAmelCase_ : Tuple = black.format_str(__magic_name__ , mode=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__magic_name__ , '''w''' , newline='''\n''' ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , '''r''' ) as f:
self.assertTrue(f.read() , __magic_name__ )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __magic_name__ ) , )
# Copy consistency with a really long name
UpperCAmelCase_ : Tuple = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __magic_name__ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __magic_name__ ) , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
UpperCAmelCase_ : Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
UpperCAmelCase_ : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
UpperCAmelCase_ : Optional[int] = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] )
self.assertFalse(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
UpperCAmelCase_ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ : Any = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__magic_name__ , __magic_name__ )
| 721
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 10
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [1, 2, 3, 4]
lowerCamelCase__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."""
lowerCamelCase__ , lowerCamelCase__ = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,[] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """"""
lowerCamelCase__ , lowerCamelCase__ = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,[] )
self.assertEqual(lowerCAmelCase__ ,[] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
lowerCamelCase__ , lowerCamelCase__ = process_story(lowerCAmelCase__ )
lowerCamelCase__ = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase__ = ["""It was the best of times."""]
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = torch.tensor([1, 2, 3, 4] )
lowerCamelCase__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,0 ).numpy() ,expected.numpy() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,23 ).numpy() ,expected.numpy() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,1 ).numpy() ,expected.numpy() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 1_01
lowerCamelCase__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
lowerCamelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCamelCase__ = compute_token_type_ids(lowerCAmelCase__ ,lowerCAmelCase__ )
np.testing.assert_array_equal(lowerCAmelCase__ ,lowerCAmelCase__ )
| 701
|
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9
| 0
|
from __future__ import annotations
def a__ ( _UpperCamelCase : str ):
return [ord(A__ ) - 96 for elem in plain]
def a__ ( _UpperCamelCase : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def a__ ( ):
__lowerCamelCase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' ,A__ )
print('''Decoded:''' ,decode(A__ ) )
if __name__ == "__main__":
main()
| 175
|
import pytest
__A : Optional[Any] = '__dummy_dataset1__'
__A : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = dataset_loading_script_name
SCREAMING_SNAKE_CASE = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=A__ )
SCREAMING_SNAKE_CASE = script_dir / F"{script_name}.py"
with open(A__ , "w" ) as f:
f.write(A__ )
return str(A__ )
| 16
| 0
|
from ..utils import DummyObject, requires_backends
class A ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = ['transformers', 'torch', 'note_seq']
def __init__( self : Tuple,*lowercase_ : Any,**lowercase_ : Dict )-> Union[str, Any]:
'''simple docstring'''
requires_backends(self,['transformers', 'torch', 'note_seq'] )
@classmethod
def snake_case__ ( cls : List[str],*lowercase_ : int,**lowercase_ : Optional[int] )-> Any:
'''simple docstring'''
requires_backends(cls,['transformers', 'torch', 'note_seq'] )
@classmethod
def snake_case__ ( cls : Dict,*lowercase_ : Tuple,**lowercase_ : List[str] )-> Dict:
'''simple docstring'''
requires_backends(cls,['transformers', 'torch', 'note_seq'] )
| 586
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A__ = 128
elif "12-12" in model_name:
A__ = 12
A__ = 12
elif "14-14" in model_name:
A__ = 14
A__ = 14
elif "16-16" in model_name:
A__ = 16
A__ = 16
else:
raise ValueError('Model not supported' )
A__ = 'huggingface/label-files'
if "speech-commands" in model_name:
A__ = 35
A__ = 'speech-commands-v2-id2label.json'
else:
A__ = 527
A__ = 'audioset-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
if "module.v" in name:
A__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
A__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
A__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
A__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[3] )
A__ = config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
A__ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int:
'''simple docstring'''
A__ = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
A__ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
A__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
A__ = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A__ = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
A__ = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
A__ = 1024 if 'speech-commands' not in model_name else 128
A__ = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
A__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
A__ = dataset[0]['audio']['array']
else:
A__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
A__ , A__ = torchaudio.load(SCREAMING_SNAKE_CASE__ )
A__ = waveform.squeeze().numpy()
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
A__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 586
| 1
|
"""simple docstring"""
from math import sqrt
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
snake_case_ :Optional[Any] = True
# 0 and 1 are none primes.
if number <= 1:
snake_case_ :int = False
for divisor in range(2, int(round(sqrt(__lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
snake_case_ :Union[str, Any] = False
break
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'status' must been from type bool"
return status
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
snake_case_ :Tuple = list(range(2, n + 1 ) )
snake_case_ :Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1, len(__lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
snake_case_ :List[str] = 0
# filters actual prime numbers.
snake_case_ :List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'ans' must been from type list"
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
snake_case_ :List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(__lowerCamelCase ):
ans.append(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'ans' must been from type list"
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
snake_case_ :Tuple = [] # this list will be returns of the function.
# potential prime number factors.
snake_case_ :Tuple = 2
snake_case_ :Dict = number
if number == 0 or number == 1:
ans.append(__lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__lowerCamelCase ):
while quotient != 1:
if is_prime(__lowerCamelCase ) and (quotient % factor == 0):
ans.append(__lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'ans' must been from type list"
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ :int = 0
# prime factorization of 'number'
snake_case_ :Union[str, Any] = prime_factorization(__lowerCamelCase )
snake_case_ :Union[str, Any] = max(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'ans' must been from type int"
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ :List[Any] = 0
# prime factorization of 'number'
snake_case_ :int = prime_factorization(__lowerCamelCase )
snake_case_ :Union[str, Any] = min(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'ans' must been from type int"
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0, __lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0, __lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def A ( _A ):
"""simple docstring"""
assert (
isinstance(__lowerCamelCase, __lowerCamelCase ) and (number > 2) and is_even(__lowerCamelCase )
), "'number' must been an int, even and > 2"
snake_case_ :int = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
snake_case_ :Dict = get_prime_numbers(__lowerCamelCase )
snake_case_ :Optional[int] = len(__lowerCamelCase )
# run variable for while-loops.
snake_case_ :List[str] = 0
snake_case_ :str = None
# exit variable. for break up the loops
snake_case_ :Optional[Any] = True
while i < len_pn and loop:
snake_case_ :Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
snake_case_ :Optional[int] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__lowerCamelCase, __lowerCamelCase )
and (len(__lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A ( _A, _A ):
"""simple docstring"""
assert (
isinstance(__lowerCamelCase, __lowerCamelCase )
and isinstance(__lowerCamelCase, __lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
snake_case_ :Any = 0
while numbera != 0:
snake_case_ :Optional[Any] = numbera % numbera
snake_case_ :Tuple = numbera
snake_case_ :str = rest
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A ( _A, _A ):
"""simple docstring"""
assert (
isinstance(__lowerCamelCase, __lowerCamelCase )
and isinstance(__lowerCamelCase, __lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
snake_case_ :Any = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
snake_case_ :Any = prime_factorization(__lowerCamelCase )
snake_case_ :Optional[Any] = prime_factorization(__lowerCamelCase )
elif numbera == 1 or numbera == 1:
snake_case_ :List[str] = []
snake_case_ :Optional[Any] = []
snake_case_ :str = max(__lowerCamelCase, __lowerCamelCase )
snake_case_ :Union[str, Any] = 0
snake_case_ :Tuple = 0
snake_case_ :Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
snake_case_ :Dict = prime_fac_a.count(__lowerCamelCase )
snake_case_ :int = prime_fac_a.count(__lowerCamelCase )
for _ in range(max(__lowerCamelCase, __lowerCamelCase ) ):
ans *= n
else:
snake_case_ :int = prime_fac_a.count(__lowerCamelCase )
for _ in range(__lowerCamelCase ):
ans *= n
done.append(__lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
snake_case_ :Union[str, Any] = prime_fac_a.count(__lowerCamelCase )
for _ in range(__lowerCamelCase ):
ans *= n
done.append(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
snake_case_ :Optional[int] = 0
snake_case_ :Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__lowerCamelCase ):
ans += 1
# precondition
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and is_prime(
__lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def A ( _A, _A ):
"""simple docstring"""
assert (
is_prime(__lowerCamelCase ) and is_prime(__lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
snake_case_ :Optional[int] = p_number_a + 1 # jump to the next number
snake_case_ :Tuple = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(__lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(__lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(__lowerCamelCase, __lowerCamelCase )
and ans[0] != p_number_a
and ans[len(__lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
snake_case_ :Any = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(__lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(__lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
snake_case_ :str = get_divisors(__lowerCamelCase )
# precondition
assert (
isinstance(__lowerCamelCase, __lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(__lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A ( _A, _A ):
"""simple docstring"""
assert (
isinstance(__lowerCamelCase, __lowerCamelCase )
and isinstance(__lowerCamelCase, __lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
snake_case_ :Any = gcd(abs(__lowerCamelCase ), abs(__lowerCamelCase ) )
# precondition
assert (
isinstance(__lowerCamelCase, __lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
snake_case_ :List[Any] = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def A ( _A ):
"""simple docstring"""
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
snake_case_ :Optional[Any] = 0
snake_case_ :Tuple = 1
snake_case_ :str = 1 # this will be return
for _ in range(n - 1 ):
snake_case_ :int = ans
ans += fiba
snake_case_ :Dict = tmp
return ans
| 584
|
from __future__ import annotations
def lowerCAmelCase( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return array
__a , __a = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
__a = _max - _min + 1
__a , __a = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__a = i - _min
__a = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__a = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
__a = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : str = input("""Enter numbers separated by comma:\n""")
lowerCamelCase_ : Tuple = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 559
| 0
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48_000,
"sample_size": 131_072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
}
def _snake_case ( A , A ) -> int:
return torch.atana(a_ , a_ ) / math.pi * 2
def _snake_case ( A ) -> List[Any]:
lowerCAmelCase__ = torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase__ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(a_ , a_ )
class a__ ( _UpperCAmelCase ):
'''simple docstring'''
pass
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = DiffusionAttnUnetaD(lowerCamelCase_ , n_attn_layers=4 )
lowerCAmelCase__ = deepcopy(self.diffusion )
lowerCAmelCase__ = torch.quasirandom.SobolEngine(1 , scramble=lowerCamelCase_ )
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ = MODELS_MAP[model_name]['''url''']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
__UpperCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__UpperCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__UpperCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__UpperCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__UpperCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__UpperCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def _snake_case ( A ) -> Union[str, Any]:
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _snake_case ( A ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(a_ ) and not isinstance(a_ , a_ ):
return name.replace(a_ , a_ )
elif name.startswith(a_ ):
return [name.replace(a_ , a_ ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def _snake_case ( A , A=13 ) -> Tuple:
lowerCAmelCase__ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowerCAmelCase__ = 0
if string.startswith('''net.3.''' ):
depth += 1
lowerCAmelCase__ = string[6:]
elif string.startswith('''net.''' ):
lowerCAmelCase__ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowerCAmelCase__ = string[7:]
if string.startswith('''main.''' ):
lowerCAmelCase__ = string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase__ = string[:2]
lowerCAmelCase__ = string[2:]
else:
lowerCAmelCase__ = string[0]
lowerCAmelCase__ = string[1:]
if depth == max_depth:
lowerCAmelCase__ = MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase__ = '''mid_block'''
elif depth > 0 and int(a_ ) < 7:
lowerCAmelCase__ = DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase__ = F"""down_blocks.{depth}"""
elif depth > 0 and int(a_ ) > 7:
lowerCAmelCase__ = UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase__ = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
lowerCAmelCase__ = DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase__ = F"""up_blocks.{max_depth - 1}""" if int(a_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
lowerCAmelCase__ = string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase__ = convert_resconv_naming(a_ )
elif "attentions" in new_layer:
lowerCAmelCase__ = convert_attn_naming(a_ )
lowerCAmelCase__ = new_string_left
if not isinstance(a_ , a_ ):
lowerCAmelCase__ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowerCAmelCase__ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def _snake_case ( A ) -> Optional[Any]:
lowerCAmelCase__ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase__ = rename(a_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(a_ , a_ ):
lowerCAmelCase__ = transform_conv_attns(a_ , a_ , a_ )
else:
lowerCAmelCase__ = v
return new_state_dict
def _snake_case ( A , A , A ) -> Union[str, Any]:
if len(a_ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase__ = v[:, :, 0]
else:
# bias
lowerCAmelCase__ = v
else:
# qkv matrices
lowerCAmelCase__ = v.shape[0]
lowerCAmelCase__ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase__ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase__ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _snake_case ( A ) -> int:
lowerCAmelCase__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase__ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
lowerCAmelCase__ = download(a_ )
lowerCAmelCase__ = MODELS_MAP[model_name]['''sample_rate''']
lowerCAmelCase__ = MODELS_MAP[model_name]['''sample_size''']
lowerCAmelCase__ = Object()
lowerCAmelCase__ = sample_size
lowerCAmelCase__ = sample_rate
lowerCAmelCase__ = 0
lowerCAmelCase__ = UNetaDModel(sample_size=a_ , sample_rate=a_ )
lowerCAmelCase__ = diffusers_model.state_dict()
lowerCAmelCase__ = DiffusionUncond(a_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=a_ )['''state_dict'''] )
lowerCAmelCase__ = orig_model.diffusion_ema.eval()
lowerCAmelCase__ = orig_model.state_dict()
lowerCAmelCase__ = rename_orig_weights(a_ )
lowerCAmelCase__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(a_ ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('''kernel''' ) for k in list(a_ ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
lowerCAmelCase__ = value.squeeze()
lowerCAmelCase__ = value
diffusers_model.load_state_dict(a_ )
lowerCAmelCase__ = 100
lowerCAmelCase__ = 33
lowerCAmelCase__ = IPNDMScheduler(num_train_timesteps=a_ )
lowerCAmelCase__ = torch.manual_seed(a_ )
lowerCAmelCase__ = torch.randn([1, 2, config.sample_size] , generator=a_ ).to(a_ )
lowerCAmelCase__ = torch.linspace(1 , 0 , steps + 1 , device=a_ )[:-1]
lowerCAmelCase__ = get_crash_schedule(a_ )
lowerCAmelCase__ = DanceDiffusionPipeline(unet=a_ , scheduler=a_ )
lowerCAmelCase__ = torch.manual_seed(33 )
lowerCAmelCase__ = pipe(num_inference_steps=a_ , generator=a_ ).audios
lowerCAmelCase__ = sampling.iplms_sample(a_ , a_ , a_ , {} )
lowerCAmelCase__ = generated.clamp(-1 , 1 )
lowerCAmelCase__ = (generated - audio).abs().sum()
lowerCAmelCase__ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , a_ )
print('''Diff max''' , a_ )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCAmelCase = parser.parse_args()
main(args)
| 718
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 98
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50
| 1
|
from statistics import mean, stdev
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : int = 3 )->list:
A__ = min(SCREAMING_SNAKE_CASE_ )
A__ = max(SCREAMING_SNAKE_CASE_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE_ ) for x in data]
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : int = 3 )->list:
A__ = mean(SCREAMING_SNAKE_CASE_ )
A__ = stdev(SCREAMING_SNAKE_CASE_ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE_ ) for x in data]
| 702
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: Any = logging.get_logger(__name__)
a__: List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''trocr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self,__lowerCamelCase=5_0265,__lowerCamelCase=1024,__lowerCamelCase=12,__lowerCamelCase=16,__lowerCamelCase=4096,__lowerCamelCase="gelu",__lowerCamelCase=512,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=0.0,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,**__lowerCamelCase,):
A__ = vocab_size
A__ = d_model
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = decoder_ffn_dim
A__ = activation_function
A__ = max_position_embeddings
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = init_std
A__ = decoder_layerdrop
A__ = use_cache
A__ = scale_embedding
A__ = use_learned_position_embeddings
A__ = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,decoder_start_token_id=__lowerCamelCase,**__lowerCamelCase,)
| 212
| 0
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
# Initialise PyTorch model
_a : int = BigBirdConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
_a : Union[str, Any] = BigBirdForQuestionAnswering(lowerCAmelCase_ )
else:
_a : Any = BigBirdForPreTraining(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowerCAmelCase_ , lowerCAmelCase_ , is_trivia_qa=lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 358
|
'''simple docstring'''
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : Optional[Any] = []
_a : List[str] = 1
while len(lowerCAmelCase_ ) < 1E6:
constant.append(str(lowerCAmelCase_ ) )
i += 1
_a : Optional[Any] = ''.join(lowerCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 358
| 1
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0.2 , __UpperCamelCase=0.2 )-> str:
'''simple docstring'''
UpperCAmelCase__ : List[str] = bp_numa
UpperCAmelCase__ : str = bp_numa
UpperCAmelCase__ : List[Any] = bp_numa
UpperCAmelCase__ : Union[str, Any] = conva_get[:2]
UpperCAmelCase__ : Dict = conva_get[2]
UpperCAmelCase__ : List[Any] = size_pa
UpperCAmelCase__ : Optional[int] = rate_w
UpperCAmelCase__ : Any = rate_t
UpperCAmelCase__ : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCAmelCase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCAmelCase__ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase__ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase__ : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__UpperCamelCase , "wb" ) as f:
pickle.dump(__UpperCamelCase , __UpperCamelCase )
print(F"Model saved: {save_path}" )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase )-> Dict:
'''simple docstring'''
with open(__UpperCamelCase , "rb" ) as f:
UpperCAmelCase__ : Union[str, Any] = pickle.load(__UpperCamelCase ) # noqa: S301
UpperCAmelCase__ : Optional[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
UpperCAmelCase__ : str = model_dic.get("size_pooling1" )
UpperCAmelCase__ : List[str] = model_dic.get("num_bp1" )
UpperCAmelCase__ : Union[str, Any] = model_dic.get("num_bp2" )
UpperCAmelCase__ : Dict = model_dic.get("num_bp3" )
UpperCAmelCase__ : int = model_dic.get("rate_weight" )
UpperCAmelCase__ : str = model_dic.get("rate_thre" )
# create model instance
UpperCAmelCase__ : int = CNN(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# modify model parameter
UpperCAmelCase__ : List[Any] = model_dic.get("w_conv1" )
UpperCAmelCase__ : int = model_dic.get("wkj" )
UpperCAmelCase__ : Any = model_dic.get("vji" )
UpperCAmelCase__ : int = model_dic.get("thre_conv1" )
UpperCAmelCase__ : Optional[int] = model_dic.get("thre_bp2" )
UpperCAmelCase__ : Dict = model_dic.get("thre_bp3" )
return conv_ins
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
'''simple docstring'''
return round(__UpperCamelCase , 3 )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = convs[0]
UpperCAmelCase__ : Union[str, Any] = convs[1]
UpperCAmelCase__ : Tuple = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase__ : Optional[int] = []
for i_focus in range(0 , size_data - size_conv + 1 , __UpperCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , __UpperCamelCase ):
UpperCAmelCase__ : int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
UpperCAmelCase__ : str = []
for i_focus in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
UpperCAmelCase__ : int = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase , __UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
UpperCAmelCase__ : Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
UpperCAmelCase__ : List[str] = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="average_pool" )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = len(featuremaps[0] )
UpperCAmelCase__ : Dict = int(size_map / size_pooling )
UpperCAmelCase__ : Dict = []
for i_map in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Optional[Any] = featuremaps[i_map]
UpperCAmelCase__ : List[str] = []
for i_focus in range(0 , __UpperCamelCase , __UpperCamelCase ):
for j_focus in range(0 , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase , __UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Optional[Any] = np.shape(data[i] )
UpperCAmelCase__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCAmelCase__ : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = np.asarray(__UpperCamelCase )
return data_expanded
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = np.asarray(__UpperCamelCase )
UpperCAmelCase__ : Any = np.shape(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Tuple = 0
for i_map in range(__UpperCamelCase ):
UpperCAmelCase__ : str = np.ones((size_map, size_map) )
for i in range(0 , __UpperCamelCase , __UpperCamelCase ):
for j in range(0 , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[Any] = pd_pool[
i_pool
]
UpperCAmelCase__ : str = i_pool + 1
UpperCAmelCase__ : Optional[Any] = np.multiply(
__UpperCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=bool )-> Optional[int]:
'''simple docstring'''
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__UpperCamelCase )) )
print((" - - Shape: Teach_Data ", np.shape(__UpperCamelCase )) )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Any = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase__ : str = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase__ : Union[str, Any] = np.asmatrix(datas_train[p] )
UpperCAmelCase__ : int = np.asarray(datas_teach[p] )
UpperCAmelCase__ : Any = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase__ : Dict = self.pooling(__UpperCamelCase , self.size_poolinga )
UpperCAmelCase__ : List[str] = np.shape(__UpperCamelCase )
UpperCAmelCase__ : str = self._expand(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = data_bp_input
UpperCAmelCase__ : str = np.dot(__UpperCamelCase , self.vji.T ) - self.thre_bpa
UpperCAmelCase__ : Dict = self.sig(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = np.dot(__UpperCamelCase , self.wkj.T ) - self.thre_bpa
UpperCAmelCase__ : Optional[int] = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase__ : List[str] = np.multiply(
(data_teach - bp_outa) , np.multiply(__UpperCamelCase , (1 - bp_outa) ) )
UpperCAmelCase__ : Tuple = np.multiply(
np.dot(__UpperCamelCase , self.wkj ) , np.multiply(__UpperCamelCase , (1 - bp_outa) ) )
UpperCAmelCase__ : int = np.dot(__UpperCamelCase , self.vji )
UpperCAmelCase__ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase__ : List[Any] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase__ : str = self._calculate_gradient_from_pool(
__UpperCamelCase , __UpperCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase__ : Any = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase__ : Optional[int] = self.rate_weight * np.dot(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase__ : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase__ : str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase__ : Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase__ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase__ : Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase__ : int = rp + 1
UpperCAmelCase__ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
UpperCAmelCase__ : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase , "+-" )
plt.plot(__UpperCamelCase , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__UpperCamelCase , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Tuple = np.asmatrix(datas_test[p] )
UpperCAmelCase__ : Optional[Any] = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase__ : Any = self.pooling(__UpperCamelCase , self.size_poolinga )
UpperCAmelCase__ : Tuple = self._expand(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = data_bp_input
UpperCAmelCase__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase__ : Any = self.sig(__UpperCamelCase )
UpperCAmelCase__ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase__ : Any = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase__ : Tuple = [list(map(self.do_round , __UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
'''simple docstring'''
UpperCAmelCase__ : Any = np.asmatrix(__UpperCamelCase )
UpperCAmelCase__ : str = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase__ : Dict = self.pooling(__UpperCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 711
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __magic_name__ ( __lowercase ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :ArgumentParser ):
'''simple docstring'''
A_ : Optional[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase__ , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self :Union[str, Any] , snake_case :str , snake_case :str , snake_case :bool , snake_case :bool ):
'''simple docstring'''
A_ : int = model
A_ : Optional[int] = cache
A_ : List[str] = force
A_ : Any = trust_remote_code
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 454
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699
| 0
|
from __future__ import annotations
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = set(_a ), [start]
while stack:
_lowerCamelCase = stack.pop()
explored.add(_a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_a )
return explored
_UpperCAmelCase = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 297
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "xlm-prophetnet"
_UpperCamelCase = ["past_key_values"]
_UpperCamelCase = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , a__ = 0.1 , a__ = "gelu" , a__ = 3_05_22 , a__ = 10_24 , a__ = 40_96 , a__ = 12 , a__ = 16 , a__ = 40_96 , a__ = 12 , a__ = 16 , a__ = 0.1 , a__ = 0.1 , a__ = 5_12 , a__ = 0.02 , a__ = True , a__ = True , a__ = 0 , a__ = 2 , a__ = 32 , a__ = 1_28 , a__ = False , a__ = 0.0 , a__ = True , a__ = 0 , a__ = 1 , a__ = 2 , **a__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = num_encoder_layers
_lowerCamelCase = num_encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = num_decoder_layers
_lowerCamelCase = num_decoder_attention_heads
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = init_std # Normal(0, this parameter)
_lowerCamelCase = activation_function
# parameters for xlmprophetnet
_lowerCamelCase = ngram
_lowerCamelCase = num_buckets
_lowerCamelCase = relative_max_distance
_lowerCamelCase = disable_ngram_loss
_lowerCamelCase = eps
# 3 Types of Dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = dropout
_lowerCamelCase = use_cache
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , add_cross_attention=a__ , decoder_start_token_id=a__ , **a__ , )
@property
def _UpperCAmelCase ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCAmelCase ( self , a__ ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 297
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a__ : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , "config.json" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , "config.json" ) ):
os.remove(os.path.join(lowerCAmelCase_ , "config.json" ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(lowerCAmelCase_ , "pytorch_model.bin" ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2
if unlogit:
__SCREAMING_SNAKE_CASE = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = p * torch.log(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(f"""{x + 1}""" for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.config.num_hidden_layers, model.config.num_attention_heads
__SCREAMING_SNAKE_CASE = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
__SCREAMING_SNAKE_CASE = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__SCREAMING_SNAKE_CASE = tuple(t.to(args.device ) for t in inputs )
((__SCREAMING_SNAKE_CASE) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__SCREAMING_SNAKE_CASE = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(lowerCAmelCase_ )
logger.info("Head ranked by importance scores" )
__SCREAMING_SNAKE_CASE = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__SCREAMING_SNAKE_CASE = torch.arange(
head_importance.numel() , device=args.device )
__SCREAMING_SNAKE_CASE = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , lowerCAmelCase_ , original_score * args.masking_threshold )
__SCREAMING_SNAKE_CASE = torch.ones_like(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__SCREAMING_SNAKE_CASE = original_score
while current_score >= original_score * args.masking_threshold:
__SCREAMING_SNAKE_CASE = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__SCREAMING_SNAKE_CASE = float("Inf" )
__SCREAMING_SNAKE_CASE = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__SCREAMING_SNAKE_CASE = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__SCREAMING_SNAKE_CASE = new_head_mask.view(-1 )
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = new_head_mask.view_as(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = datetime.now()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 1 / loss
__SCREAMING_SNAKE_CASE = datetime.now() - before_time
__SCREAMING_SNAKE_CASE = sum(p.numel() for p in model.parameters() )
__SCREAMING_SNAKE_CASE = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = sum(p.numel() for p in model.parameters() )
__SCREAMING_SNAKE_CASE = datetime.now()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
__SCREAMING_SNAKE_CASE = 1 / loss
__SCREAMING_SNAKE_CASE = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=lowerCAmelCase_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=lowerCAmelCase_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=lowerCAmelCase_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=lowerCAmelCase_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=lowerCAmelCase_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=lowerCAmelCase_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=lowerCAmelCase_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=lowerCAmelCase_ , help="Batch size." )
parser.add_argument("--seed" , type=lowerCAmelCase_ , default=42 )
parser.add_argument("--local_rank" , type=lowerCAmelCase_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=lowerCAmelCase_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=lowerCAmelCase_ , default="" , help="Can be used for distant debugging." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__SCREAMING_SNAKE_CASE = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , args.local_rank )
__SCREAMING_SNAKE_CASE = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__SCREAMING_SNAKE_CASE = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
__SCREAMING_SNAKE_CASE = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Prepare dataset
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__SCREAMING_SNAKE_CASE = (torch.from_numpy(lowerCAmelCase_ ),)
__SCREAMING_SNAKE_CASE = TensorDataset(*lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = RandomSampler(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__SCREAMING_SNAKE_CASE = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 682
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Optional[Any] = 1_6
a__ : str = 3_2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : List[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["lr"]
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 682
| 1
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
a = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class a_ ( snake_case ):
def __init__( self : Any , *a_ : Union[str, Any] , **a_ : str ) -> Tuple:
super().__init__(*a_ , **a_ )
self.check_model_type(a_ )
def UpperCamelCase ( self : List[Any] , a_ : Tuple=None , a_ : List[Any]=None , a_ : Dict=None , **a_ : int ) -> int:
snake_case , snake_case: Optional[Any] ={}, {}
if padding is not None:
snake_case: Optional[int] =padding
if truncation is not None:
snake_case: Any =truncation
if top_k is not None:
snake_case: Any =top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , a_ : Union["Image.Image", str] , a_ : str = None , **a_ : str ) -> List[Any]:
if isinstance(a_ , (Image.Image, str) ) and isinstance(a_ , a_ ):
snake_case: Union[str, Any] ={'image': image, 'question': question}
else:
snake_case: int =image
snake_case: Optional[Any] =super().__call__(a_ , **a_ )
return results
def UpperCamelCase ( self : Any , a_ : List[str] , a_ : List[str]=False , a_ : int=False ) -> List[Any]:
snake_case: List[str] =load_image(inputs['image'] )
snake_case: Union[str, Any] =self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=a_ , truncation=a_ )
snake_case: Optional[Any] =self.image_processor(images=a_ , return_tensors=self.framework )
model_inputs.update(a_ )
return model_inputs
def UpperCamelCase ( self : Optional[Any] , a_ : List[Any] ) -> int:
snake_case: str =self.model(**a_ )
return model_outputs
def UpperCamelCase ( self : Optional[Any] , a_ : Optional[int] , a_ : Optional[int]=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
snake_case: Dict =self.model.config.num_labels
if self.framework == "pt":
snake_case: Union[str, Any] =model_outputs.logits.sigmoid()[0]
snake_case , snake_case: List[str] =probs.topk(a_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case: List[Any] =scores.tolist()
snake_case: Optional[int] =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
| 347
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a_ ( snake_case ):
UpperCAmelCase : Dict = """time_series_transformer"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : Optional[Union[str, bool]] = "mean" , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 3_2 , a_ : int = 3_2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : bool = True , a_ : str = "gelu" , a_ : int = 6_4 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 1_0_0 , a_ : float = 0.0_2 , a_ : Optional[int]=True , **a_ : Tuple , ) -> Optional[int]:
# time series specific configuration
snake_case: Dict =prediction_length
snake_case: Any =context_length or prediction_length
snake_case: str =distribution_output
snake_case: List[str] =loss
snake_case: Optional[Any] =input_size
snake_case: Optional[Any] =num_time_features
snake_case: List[str] =lags_sequence
snake_case: Union[str, Any] =scaling
snake_case: List[str] =num_dynamic_real_features
snake_case: Any =num_static_real_features
snake_case: Dict =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case: Dict =cardinality
else:
snake_case: Dict =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case: List[Any] =embedding_dimension
else:
snake_case: str =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case: Any =num_parallel_samples
# Transformer architecture configuration
snake_case: Union[str, Any] =input_size * len(a_ ) + self._number_of_features
snake_case: List[Any] =d_model
snake_case: int =encoder_attention_heads
snake_case: Optional[int] =decoder_attention_heads
snake_case: str =encoder_ffn_dim
snake_case: List[Any] =decoder_ffn_dim
snake_case: str =encoder_layers
snake_case: List[str] =decoder_layers
snake_case: List[Any] =dropout
snake_case: Union[str, Any] =attention_dropout
snake_case: Optional[int] =activation_dropout
snake_case: str =encoder_layerdrop
snake_case: Optional[int] =decoder_layerdrop
snake_case: Tuple =activation_function
snake_case: List[Any] =init_std
snake_case: Union[str, Any] =use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Tuple ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 347
| 1
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCAmelCase: Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class lowerCamelCase__ ( __UpperCamelCase ):
def __init__( self , *snake_case , **snake_case ) -> Any:
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , """vision""" )
self.check_model_type(snake_case__ )
def __call__( self , snake_case , **snake_case ) -> Optional[int]:
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def _UpperCAmelCase ( self , **snake_case ) -> Dict:
"""simple docstring"""
return {}, {}, {}
def _UpperCAmelCase ( self , snake_case ) -> Tuple:
"""simple docstring"""
lowercase : Dict = load_image(snake_case__ )
lowercase : Optional[Any] = image.size
lowercase : List[str] = self.image_processor(images=snake_case__ , return_tensors=self.framework )
return model_inputs
def _UpperCAmelCase ( self , snake_case ) -> List[str]:
"""simple docstring"""
lowercase : Any = self.model(**snake_case__ )
return model_outputs
def _UpperCAmelCase ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = model_outputs.predicted_depth
lowercase : List[str] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=snake_case__ )
lowercase : Optional[int] = prediction.squeeze().cpu().numpy()
lowercase : Dict = (output * 2_5_5 / np.max(snake_case__ )).astype("""uint8""" )
lowercase : Any = Image.fromarray(snake_case__ )
lowercase : List[Any] = {}
lowercase : Union[str, Any] = predicted_depth
lowercase : List[Any] = depth
return output_dict
| 607
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = args.pruning_method
SCREAMING_SNAKE_CASE = args.threshold
SCREAMING_SNAKE_CASE = args.model_name_or_path.rstrip('/' )
SCREAMING_SNAKE_CASE = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
SCREAMING_SNAKE_CASE = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE = MagnitudeBinarizer.apply(inputs=_UpperCamelCase , threshold=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE = TopKBinarizer.apply(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE = ThresholdBinarizer.apply(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -0.1, 1.1
SCREAMING_SNAKE_CASE = torch.sigmoid(_UpperCamelCase )
SCREAMING_SNAKE_CASE = s * (r - l) + l
SCREAMING_SNAKE_CASE = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
SCREAMING_SNAKE_CASE = os.path.join(
os.path.dirname(_UpperCamelCase ) , f"""bertarized_{os.path.basename(_UpperCamelCase )}""" )
if not os.path.isdir(_UpperCamelCase ):
shutil.copytree(_UpperCamelCase , _UpperCamelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
a_ : Dict = parser.parse_args()
main(args)
| 439
| 0
|
from __future__ import annotations
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : int | None = None , __UpperCamelCase : int | None = None ):
"""simple docstring"""
if start is None:
__UpperCamelCase =0
if end is None:
__UpperCamelCase =len(__UpperCamelCase ) - 1
if start >= end:
return
__UpperCamelCase =(start + end) // 2
slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase )
if sequence[end] < sequence[mid]:
__UpperCamelCase , __UpperCamelCase =sequence[mid], sequence[end]
slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ChineseCLIPImageProcessor'''
lowercase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
__UpperCamelCase =kwargs.pop('''feature_extractor''' )
__UpperCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =self.image_processor
def __call__( self : List[str] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCamelCase =self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
__UpperCamelCase =self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
__UpperCamelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
__UpperCamelCase =self.tokenizer.model_input_names
__UpperCamelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
| 296
| 0
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase : int = 1 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : int = 50 , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , **lowerCamelCase : List[Any] , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
_UpperCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(lowerCamelCase , lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase ), "This is a local test"
| 108
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 108
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : Optional[int] = ProphetNetTokenizer
_a : Dict = False
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().setUp()
__snake_case : Any =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__snake_case : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCamelCase ( self : Tuple , a : List[str] ):
"""simple docstring"""
__snake_case : Any ='''UNwant\u00E9d,running'''
__snake_case : Optional[int] ='''unwanted, running'''
return input_text, output_text
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Optional[Any] =self.tokenizer_class(self.vocab_file )
__snake_case : Dict =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Union[str, Any] =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Optional[int] =BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : str =BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : str =BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Any =BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : List[Any] =BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Union[str, Any] =BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : str =BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Tuple =BasicTokenizer(do_lower_case=a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__snake_case : Tuple ={}
for i, token in enumerate(a ):
__snake_case : Optional[int] =i
__snake_case : str =WordpieceTokenizer(vocab=a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Optional[Any] =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
__snake_case : int =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case : Dict =[1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
__snake_case : List[Any] =tokenizer(a , padding=a , return_tensors='''pt''' )
self.assertIsInstance(a , a )
__snake_case : Optional[Any] =list(batch.input_ids.numpy()[0] )
self.assertListEqual(a , a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
__snake_case : Any =tokenizer.encode('''sequence builders''' , add_special_tokens=a )
__snake_case : Union[str, Any] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=a )
__snake_case : List[str] =tokenizer.build_inputs_with_special_tokens(a )
__snake_case : List[Any] =tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 497
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _lowercase ( lowerCAmelCase ):
_a : str = '''ctrl'''
_a : Optional[int] = ['''past_key_values''']
_a : List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , a : Optional[int]=2_4_6_5_3_4 , a : Optional[int]=2_5_6 , a : Optional[int]=1_2_8_0 , a : Any=8_1_9_2 , a : int=4_8 , a : Optional[Any]=1_6 , a : str=0.1 , a : str=0.1 , a : Any=1e-6 , a : Optional[int]=0.0_2 , a : int=True , **a : str , ):
"""simple docstring"""
__snake_case : Tuple =vocab_size
__snake_case : Optional[Any] =n_positions
__snake_case : List[Any] =n_embd
__snake_case : Any =n_layer
__snake_case : Any =n_head
__snake_case : str =dff
__snake_case : List[str] =resid_pdrop
__snake_case : str =embd_pdrop
__snake_case : Union[str, Any] =layer_norm_epsilon
__snake_case : List[Any] =initializer_range
__snake_case : Any =use_cache
super().__init__(**a )
| 497
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a = logging.get_logger(__name__)
a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
a = {'''facebook/blenderbot-3B''': 128}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase : Any = BlenderbotTokenizer
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[int]="replace" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : Optional[int]="<mask>" , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : str=True , **_UpperCAmelCase : str , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
_A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
_A = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
_A = add_prefix_space
_A = pre_tok_class(**_UpperCAmelCase )
_A = add_prefix_space
_A = 'post_processor'
_A = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
_A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A = tuple(state['sep'] )
if "cls" in state:
_A = tuple(state['cls'] )
_A = False
if state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
_A = add_prefix_space
_A = True
if state.get('trim_offsets' , _UpperCAmelCase ) != trim_offsets:
_A = trim_offsets
_A = True
if changes_to_apply:
_A = getattr(_UpperCAmelCase , state.pop('type' ) )
_A = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase_ ( self : Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, Any] ):
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
_A = value
def lowerCAmelCase_ ( self : int , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[int] ):
_A = kwargs.get('is_split_into_words' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Union[str, Any] ):
_A = kwargs.get('is_split_into_words' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
_A = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : "Conversation" ):
_A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_UpperCAmelCase )
_A = ' '.join(_UpperCAmelCase )
_A = self.encode(_UpperCAmelCase )
if len(_UpperCAmelCase ) > self.model_max_length:
_A = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 7
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase__ ( a__ ):
A = 42
class lowerCAmelCase__ ( a__ ,a__ ):
@register_to_config
def __init__( self : int , UpperCamelCase_ : List[str] = 32 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : Dict = 20 , UpperCamelCase_ : str = 768 , UpperCamelCase_ : Optional[Any]=77 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Dict = 0.0 , UpperCamelCase_ : Optional[int] = "silu" , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Dict = None , UpperCamelCase_ : Optional[int] = "linear" , UpperCamelCase_ : str = "prd" , UpperCamelCase_ : Dict = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : Optional[int] = None , ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Any = num_attention_heads
lowerCamelCase_ : Optional[int] = attention_head_dim
lowerCamelCase_ : Tuple = num_attention_heads * attention_head_dim
lowerCamelCase_ : Optional[Any] = additional_embeddings
lowerCamelCase_ : Union[str, Any] = time_embed_dim or inner_dim
lowerCamelCase_ : Union[str, Any] = embedding_proj_dim or embedding_dim
lowerCamelCase_ : Optional[int] = clip_embed_dim or embedding_dim
lowerCamelCase_ : int = Timesteps(_A , _A , 0 )
lowerCamelCase_ : int = TimestepEmbedding(_A , _A , out_dim=_A , act_fn=_A )
lowerCamelCase_ : List[Any] = nn.Linear(_A , _A )
if embedding_proj_norm_type is None:
lowerCamelCase_ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
lowerCamelCase_ : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
lowerCamelCase_ : Tuple = nn.Linear(_A , _A )
if encoder_hid_proj_type is None:
lowerCamelCase_ : int = None
elif encoder_hid_proj_type == "linear":
lowerCamelCase_ : List[Any] = nn.Linear(_A , _A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
lowerCamelCase_ : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _A ) )
if added_emb_type == "prd":
lowerCamelCase_ : Dict = nn.Parameter(torch.zeros(1 , 1 , _A ) )
elif added_emb_type is None:
lowerCamelCase_ : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
lowerCamelCase_ : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , activation_fn='''gelu''' , attention_bias=_A , )
for d in range(_A )
] )
if norm_in_type == "layer":
lowerCamelCase_ : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
lowerCamelCase_ : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
lowerCamelCase_ : Union[str, Any] = nn.LayerNorm(_A )
lowerCamelCase_ : int = nn.Linear(_A , _A )
lowerCamelCase_ : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
lowerCamelCase_ : Tuple = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _A , persistent=_A )
lowerCamelCase_ : Tuple = nn.Parameter(torch.zeros(1 , _A ) )
lowerCamelCase_ : Dict = nn.Parameter(torch.zeros(1 , _A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = {}
def fn_recursive_add_processors(UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
if hasattr(_A , '''set_processor''' ):
lowerCamelCase_ : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
if hasattr(_A , '''set_processor''' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = True , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : str = hidden_states.shape[0]
lowerCamelCase_ : int = timestep
if not torch.is_tensor(_A ):
lowerCamelCase_ : str = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ : Optional[int] = timesteps * torch.ones(_A , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase_ : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCamelCase_ : Any = timesteps_projected.to(dtype=self.dtype )
lowerCamelCase_ : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
lowerCamelCase_ : int = self.embedding_proj_norm(_A )
lowerCamelCase_ : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCamelCase_ : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
lowerCamelCase_ : Any = self.proj_in(_A )
lowerCamelCase_ : Dict = self.positional_embedding.to(hidden_states.dtype )
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCamelCase_ : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCamelCase_ : Any = hidden_states[:, None, :]
lowerCamelCase_ : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCamelCase_ : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A , -1 , -1 )
additional_embeds.append(_A )
lowerCamelCase_ : List[str] = torch.cat(
_A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCamelCase_ : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCamelCase_ : Any = F.pad(
_A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCamelCase_ : int = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCamelCase_ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
lowerCamelCase_ : Union[str, Any] = F.pad(_A , (0, self.additional_embeddings) , value=0.0 )
lowerCamelCase_ : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCamelCase_ : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCamelCase_ : Any = self.norm_in(_A )
for block in self.transformer_blocks:
lowerCamelCase_ : int = block(_A , attention_mask=_A )
lowerCamelCase_ : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
lowerCamelCase_ : Optional[int] = hidden_states[:, -1]
else:
lowerCamelCase_ : Any = hidden_states[:, additional_embeddings_len:]
lowerCamelCase_ : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 716
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 418
| 0
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCamelCase : Dict = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None ):
# Recurse if needed
if "." in tensor_name:
lowerCAmelCase = tensor_name.split('.' )
for split in splits[:-1]:
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
lowerCAmelCase = new_module
lowerCAmelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowerCAmelCase = tensor_name in module._buffers
lowerCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowerCAmelCase = False
lowerCAmelCase = False
if is_buffer or not is_bitsandbytes_available():
lowerCAmelCase = False
lowerCAmelCase = False
else:
lowerCAmelCase = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowerCAmelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCAmelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCAmelCase = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
lowerCAmelCase = value.to('cpu' )
if value.dtype == torch.inta:
lowerCAmelCase = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
lowerCAmelCase = torch.tensor(_UpperCAmelCase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _UpperCAmelCase ) and fpaa_statistics is None:
lowerCAmelCase = new_value.T
lowerCAmelCase = old_value.__dict__
if is_abit:
lowerCAmelCase = bnb.nn.IntaParams(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
lowerCAmelCase = bnb.nn.Paramsabit(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase ).to(_UpperCAmelCase )
lowerCAmelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
lowerCAmelCase = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
lowerCAmelCase = value.to(_UpperCAmelCase )
else:
lowerCAmelCase = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase )
if is_buffer:
lowerCAmelCase = new_value
else:
lowerCAmelCase = nn.Parameter(_UpperCAmelCase , requires_grad=old_value.requires_grad )
lowerCAmelCase = new_value
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=False ):
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase = []
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase , nn.Linear ) or isinstance(_UpperCAmelCase , _UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase ,lowerCAmelCase = module.weight.shape
else:
lowerCAmelCase = module.in_features
lowerCAmelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCAmelCase = bnb.nn.LinearabitLt(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowerCAmelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCAmelCase = bnb.nn.Linearabit(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowerCAmelCase = True
# Store the module class in case we need to transpose the weight later
lowerCAmelCase = type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
lowerCAmelCase ,lowerCAmelCase = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_been_replaced=_UpperCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None ):
lowerCAmelCase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
lowerCAmelCase ,lowerCAmelCase = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _UpperCAmelCase , )
return replace_with_bnb_linear(*_UpperCAmelCase , **_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[int] ):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _UpperCAmelCase , )
return set_module_quantized_tensor_to_device(*_UpperCAmelCase , **_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCAmelCase = find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase = sum(_UpperCAmelCase , [] )
lowerCAmelCase = len(_UpperCAmelCase ) > 0
# Check if it is a base model
lowerCAmelCase = not hasattr(_UpperCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase = list(model.named_children() )
lowerCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
lowerCAmelCase = list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
lowerCAmelCase = ['.weight', '.bias']
lowerCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase = name.replace(_UpperCAmelCase , '' )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 4
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE: str = _symbol_database.Default()
SCREAMING_SNAKE_CASE: Dict = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
SCREAMING_SNAKE_CASE: Optional[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE: List[Any] = None
SCREAMING_SNAKE_CASE: str = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE: List[Any] = 4_5
SCREAMING_SNAKE_CASE: List[Any] = 1_5_8_1
SCREAMING_SNAKE_CASE: Union[str, Any] = 1_5_1_7
SCREAMING_SNAKE_CASE: Any = 1_5_7_0
SCREAMING_SNAKE_CASE: Union[str, Any] = 1_5_8_4
SCREAMING_SNAKE_CASE: str = 1_7_9_3
SCREAMING_SNAKE_CASE: List[Any] = 1_7_9_5
SCREAMING_SNAKE_CASE: Any = 1_9_1_6
SCREAMING_SNAKE_CASE: Optional[Any] = 1_8_6_4
SCREAMING_SNAKE_CASE: Union[str, Any] = 1_9_0_5
SCREAMING_SNAKE_CASE: str = 1_9_1_9
SCREAMING_SNAKE_CASE: Any = 2_4_2_9
SCREAMING_SNAKE_CASE: Dict = 2_2_0_8
SCREAMING_SNAKE_CASE: Optional[Any] = 2_4_1_8
SCREAMING_SNAKE_CASE: Optional[Any] = 2_3_2_3
SCREAMING_SNAKE_CASE: Tuple = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 360
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[int] = "https://openaipublic.azureedge.net/jukebox/models/"
_lowercase : List[Any] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
A = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
A = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
A = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
A = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] ) -> Optional[int]:
"""simple docstring"""
A = {}
import re
A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(UpperCamelCase__ ):
A = re_encoder_block_conv_in.match(UpperCamelCase__ )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] )
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
A = re_encoder_block_conv_in.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_encoder_block_resnet.fullmatch(UpperCamelCase__ ):
A = re_encoder_block_resnet.match(UpperCamelCase__ )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] )
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_encoder_block_resnet.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_encoder_block_proj_out.fullmatch(UpperCamelCase__ ):
A = re_encoder_block_proj_out.match(UpperCamelCase__ )
A = regex_match.groups()
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
A = re_encoder_block_proj_out.sub(UpperCamelCase__ , UpperCamelCase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(UpperCamelCase__ ):
A = re_decoder_block_conv_out.match(UpperCamelCase__ )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] ) - 2
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
A = re_decoder_block_conv_out.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_decoder_block_resnet.fullmatch(UpperCamelCase__ ):
A = re_decoder_block_resnet.match(UpperCamelCase__ )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] ) - 2
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_decoder_block_resnet.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_decoder_block_proj_in.fullmatch(UpperCamelCase__ ):
A = re_decoder_block_proj_in.match(UpperCamelCase__ )
A = regex_match.groups()
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
A = re_decoder_block_proj_in.sub(UpperCamelCase__ , UpperCamelCase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(UpperCamelCase__ ):
A = re_prior_cond_conv_out.match(UpperCamelCase__ )
A = regex_match.groups()
A = int(groups[1] ) * 2 + int(groups[2] ) - 2
A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
A = re_prior_cond_conv_out.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_prior_cond_resnet.fullmatch(UpperCamelCase__ ):
A = re_prior_cond_resnet.match(UpperCamelCase__ )
A = regex_match.groups()
A = int(groups[1] ) * 2 + int(groups[2] ) - 2
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_prior_cond_resnet.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_prior_cond_proj_in.fullmatch(UpperCamelCase__ ):
A = re_prior_cond_proj_in.match(UpperCamelCase__ )
A = regex_match.groups()
A = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
A = re_prior_cond_proj_in.sub(UpperCamelCase__ , UpperCamelCase__ )
# keep original key
else:
A = original_key
A = replace_key(UpperCamelCase__ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
A = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
A = original_key
A = original_key
A = value
return new_dict
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Union[str, Any]=None ) -> Any:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
A = requests.get(f'{PREFIX}{file}' , allow_redirects=UpperCamelCase__ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=UpperCamelCase__ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content )
A = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A = JukeboxConfig.from_pretrained(UpperCamelCase__ )
A = JukeboxModel(UpperCamelCase__ )
A = []
A = {}
for i, dict_name in enumerate(UpperCamelCase__ ):
A = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""]
A = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A = old_dic[k]
elif k.endswith(""".w""" ):
A = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A = old_dic[k]
else:
A = old_dic[k]
A = """vqvae""" if i == 0 else f'priors.{3 - i}'
A = fix_jukebox_keys(UpperCamelCase__ , model.state_dict() , UpperCamelCase__ , UpperCamelCase__ )
weight_dict.append(UpperCamelCase__ )
A = weight_dict.pop(0 )
model.vqvae.load_state_dict(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
return weight_dict
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_lowercase : Optional[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 546
|
from __future__ import annotations
import requests
def _lowerCAmelCase ( UpperCamelCase__: str ) -> dict:
"""simple docstring"""
A = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(UpperCamelCase__ ).json()
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> list[dict]:
"""simple docstring"""
A = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> str:
"""simple docstring"""
A = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("""* [{title}]({url})""".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 546
| 1
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase ( __lowerCamelCase ):
__lowercase = "M-CLIP"
def __init__( self :Dict , lowercase_ :Any=10_24 , lowercase_ :Union[str, Any]=7_68 , **lowercase_ :List[Any] )-> Union[str, Any]:
A__ = transformerDimSize
A__ = imageDimSize
super().__init__(**lowercase_ )
class UpperCAmelCase ( __lowerCamelCase ):
__lowercase = MCLIPConfig
def __init__( self :Union[str, Any] , lowercase_ :List[str] , *lowercase_ :List[str] , **lowercase_ :List[Any] )-> Optional[int]:
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
A__ = XLMRobertaModel(lowercase_ )
A__ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :str , lowercase_ :Optional[Any] )-> List[str]:
A__ = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0]
A__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs
| 440
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 253
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
while cur > 1:
# Find the maximum number in arr
__UpperCamelCase =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCamelCase =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE__ )]
# Reverse whole list
__UpperCamelCase =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE__ )]
cur -= 1
return arr
if __name__ == "__main__":
_A = input('Enter numbers separated by a comma:\n').strip()
_A = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 682
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__a :Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case_ )
class _a :
"""simple docstring"""
_lowerCamelCase : str
_lowerCamelCase : str
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case_ )
class _a :
"""simple docstring"""
_lowerCamelCase : List[int]
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[Union[int, float]] = None
_lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[InputFeatures]
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str=False , UpperCAmelCase : bool = False , ):
A_ = hans_processors[task]()
A_ = os.path.join(
UpperCAmelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
A_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
A_ = torch.load(UpperCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
A_ = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info("Training examples: %s" , len(UpperCAmelCase ) )
A_ = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info("Saving features into cached file %s" , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : int , UpperCAmelCase : Optional[Any] ):
return self.features[i]
def __A ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
_lowerCamelCase : List[InputFeatures]
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : int=False , UpperCAmelCase : bool = False , ):
A_ = hans_processors[task]()
A_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
A_ = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
A_ = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __A ( self : List[Any] ):
return self.dataset
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : Any , UpperCAmelCase : str ):
return self.features[i]
def __A ( self : str ):
return self.label_list
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , "heuristics_train_set.txt" ) ) , "train" )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def __A ( self : Union[str, Any] ):
return ["contradiction", "entailment", "neutral"]
def __A ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str ):
A_ = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
A_ = "%s-%s" % (set_type, line[0])
A_ = line[5]
A_ = line[6]
A_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
A_ = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def __snake_case ( __UpperCamelCase : List[InputExample] ,__UpperCamelCase : List[str] ,__UpperCamelCase : int ,__UpperCamelCase : PreTrainedTokenizer ,):
"""simple docstring"""
A_ = {label: i for i, label in enumerate(__UpperCamelCase )}
A_ = []
for ex_index, example in tqdm.tqdm(enumerate(__UpperCamelCase ) ,desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
A_ = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=__UpperCamelCase ,max_length=__UpperCamelCase ,padding="max_length" ,truncation=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,)
A_ = label_map[example.label] if example.label in label_map else 0
A_ = int(example.pairID )
features.append(InputFeatures(**__UpperCamelCase ,label=__UpperCamelCase ,pairID=__UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
__a :Union[str, Any] = {
'hans': 3,
}
__a :Any = {
'hans': HansProcessor,
}
| 86
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 401
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__magic_name__ = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
__magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = self.get_image_processor()
__magic_name__ = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
__magic_name__ = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__magic_name__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__magic_name__ = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = '''lower newer'''
__magic_name__ = processor(text=UpperCamelCase_ )
__magic_name__ = tokenizer(UpperCamelCase_ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(UpperCamelCase_ )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = AlignProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 190
|
"""simple docstring"""
from pathlib import Path
import fire
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
__magic_name__ = Path(__UpperCamelCase )
__magic_name__ = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
__magic_name__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
__magic_name__ = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 190
| 1
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def lowerCAmelCase__ ( self ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = FlaxViTModel(config=snake_case_ )
_A = model(snake_case_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_A = (self.image_size, self.image_size)
_A = (self.patch_size, self.patch_size)
_A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = self.type_sequence_label_size
_A = FlaxViTForImageClassification(config=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = FlaxViTForImageClassification(snake_case_ )
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
),
) = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCAmelCase__ ( self ):
_A = FlaxViTModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
_A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(snake_case_ , snake_case_ )
_A = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
_A = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_A = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self ):
for model_class_name in self.all_model_classes:
_A = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_A = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
| 27
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457
| 0
|
"""simple docstring"""
from math import loga
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = original_name.split('''.''' )[0]
__A = key.split('''.''' )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
__A = orig_block_num - offset
__A = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = OrderedDict()
__A , __A = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
__A = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
__A = key[: key.find('''proj''' )]
__A = key.replace(__UpperCamelCase , f'patch_embeddings.{total_embed_found}.' )
__A = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
__A = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
__A = key.replace('''head''' , '''classifier''' )
__A = value
return new_state_dict
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = PoolFormerConfig()
# set attributes based on model_name
__A = '''huggingface/label-files'''
__A = model_name[-3:]
__A = 1_0_0_0
__A = '''imagenet-1k-id2label.json'''
__A = (1, 1_0_0_0)
# set config attributes
__A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
if size == "s12":
__A = [2, 2, 6, 2]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s24":
__A = [4, 4, 1_2, 4]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s36":
__A = [6, 6, 1_8, 6]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 1e-6
__A = 0.9
elif size == "m36":
__A = [6, 6, 1_8, 6]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
elif size == "m48":
__A = [8, 8, 2_4, 8]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
__A = prepare_img()
__A = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
__A = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
__A = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
__A = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
__A = model(__UpperCamelCase )
__A = outputs.logits
# define expected logit slices for different models
if size == "s12":
__A = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__A = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__A = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__A = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__A = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 215
| 0
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __A :
def __init__( self , UpperCamelCase_ = "cpu" , UpperCamelCase_ = "openai/clip-vit-large-patch14" ):
__UpperCAmelCase : Union[str, Any] = device
__UpperCAmelCase : List[Any] = CLIPTokenizerFast.from_pretrained(UpperCamelCase_ )
__UpperCAmelCase : int = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__UpperCAmelCase : Any = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__UpperCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__UpperCAmelCase : Optional[int] = torchvision.transforms.Resize(2_24 )
__UpperCAmelCase : Dict = torchvision.transforms.CenterCrop(2_24 )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.resize(UpperCamelCase_ )
__UpperCAmelCase : int = self.center_crop(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self.normalize(UpperCamelCase_ )
return images
def __call__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
__UpperCAmelCase : Tuple = self.tokenizer(text=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : int = self.preprocess_img(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __A (nn.Module ):
def __init__( self , UpperCamelCase_=10 , UpperCamelCase_=0.0_1 , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_="image" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , ):
super().__init__()
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Optional[Any] = device if device else get_device()
if vqgan:
__UpperCAmelCase : Optional[int] = vqgan
else:
__UpperCAmelCase : Any = load_vqgan(self.device , conf_path=UpperCamelCase_ , ckpt_path=UpperCamelCase_ )
self.vqgan.eval()
if clip:
__UpperCAmelCase : List[str] = clip
else:
__UpperCAmelCase : Dict = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
__UpperCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device )
__UpperCAmelCase : int = iterations
__UpperCAmelCase : Dict = lr
__UpperCAmelCase : Tuple = log
__UpperCAmelCase : Tuple = make_grid
__UpperCAmelCase : Tuple = return_val
__UpperCAmelCase : Dict = quantize
__UpperCAmelCase : int = self.vqgan.decoder.z_shape
def _snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=5 , UpperCamelCase_=True ):
__UpperCAmelCase : Dict = []
if output_path is None:
__UpperCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
__UpperCAmelCase : str = self.save_path
__UpperCAmelCase : Any = sorted(glob(input_path + "/*" ) )
if not len(UpperCamelCase_ ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(UpperCamelCase_ ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
__UpperCAmelCase : Optional[int] = total_duration / len(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = [frame_duration] * len(UpperCamelCase_ )
if extend_frames:
__UpperCAmelCase : int = 1.5
__UpperCAmelCase : int = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(UpperCamelCase_ ) )
imageio.mimsave(UpperCamelCase_ , UpperCamelCase_ , duration=UpperCamelCase_ )
print(f"""gif saved to {output_path}""" )
def _snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None ):
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
__UpperCAmelCase : Union[str, Any] = preprocess(Image.open(UpperCamelCase_ ) , target_image_size=2_56 ).to(self.device )
__UpperCAmelCase : int = preprocess_vqgan(UpperCamelCase_ )
__UpperCAmelCase , *__UpperCAmelCase : Dict = self.vqgan.encode(UpperCamelCase_ )
return z
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : str = self.latent.detach().requires_grad_()
__UpperCAmelCase : List[Any] = base_latent + transform_vector
if self.quantize:
__UpperCAmelCase , *__UpperCAmelCase : Tuple = self.vqgan.quantize(UpperCamelCase_ )
else:
__UpperCAmelCase : Tuple = trans_latent
return self.vqgan.decode(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
__UpperCAmelCase : Optional[int] = self.clip_preprocessor(text=UpperCamelCase_ , images=UpperCamelCase_ , return_tensors="pt" , padding=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.clip(**UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = clip_outputs.logits_per_image
if weights is not None:
__UpperCAmelCase : List[str] = similarity_logits * weights
return similarity_logits.sum()
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self._get_clip_similarity(pos_prompts["prompts"] , UpperCamelCase_ , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
__UpperCAmelCase : Tuple = self._get_clip_similarity(neg_prompts["prompts"] , UpperCamelCase_ , weights=neg_prompts["weights"] )
else:
__UpperCAmelCase : Dict = torch.tensor([1] , device=self.device )
__UpperCAmelCase : Union[str, Any] = -torch.log(UpperCamelCase_ ) + torch.log(UpperCamelCase_ )
return loss
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = torch.randn_like(self.latent , requires_grad=UpperCamelCase_ , device=self.device )
__UpperCAmelCase : List[str] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__UpperCAmelCase : List[str] = self._add_vector(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = loop_post_process(UpperCamelCase_ )
__UpperCAmelCase : Dict = self._get_CLIP_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print("CLIP loss" , UpperCamelCase_ )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
wandb.init(reinit=UpperCamelCase_ , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
__UpperCAmelCase : Dict = Image.open(UpperCamelCase_ )
__UpperCAmelCase : List[str] = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ ):
if not prompts:
return []
__UpperCAmelCase : str = []
__UpperCAmelCase : List[str] = []
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(UpperCamelCase_ , (tuple, list) ):
__UpperCAmelCase : Optional[int] = prompt[0]
__UpperCAmelCase : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
__UpperCAmelCase , __UpperCAmelCase : Any = prompt.split(":" )
__UpperCAmelCase : Optional[int] = float(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[Any] = prompt
__UpperCAmelCase : Dict = 1.0
processed_prompts.append(UpperCamelCase_ )
weights.append(UpperCamelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase_ , device=self.device ),
}
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=None , ):
if image_path:
__UpperCAmelCase : List[str] = self._get_latent(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
__UpperCAmelCase : str = self.process_prompts(UpperCamelCase_ )
__UpperCAmelCase : Any = self.process_prompts(UpperCamelCase_ )
if save_final and save_path is None:
__UpperCAmelCase : List[Any] = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = save_path + "_" + get_timestamp()
os.makedirs(UpperCamelCase_ )
__UpperCAmelCase : Tuple = save_path
__UpperCAmelCase : Union[str, Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(UpperCamelCase_ ) )
__UpperCAmelCase : Dict = loop_post_process(UpperCamelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ):
if show_intermediate:
show_pil(UpperCamelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(UpperCamelCase_ )} )
if show_final:
show_pil(UpperCamelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 168
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __A (__magic_name__ ):
@staticmethod
def _snake_case ( UpperCamelCase_ ):
__UpperCAmelCase : Dict = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase_ , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = model
__UpperCAmelCase : int = cache
__UpperCAmelCase : Union[str, Any] = force
__UpperCAmelCase : Optional[int] = trust_remote_code
def _snake_case ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 168
| 1
|
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def lowerCAmelCase_ ( *lowercase: Optional[Any] , **lowercase: Any ) -> List[str]:
'''simple docstring'''
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_ ( lowercase: Namespace ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: Optional[int] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase , args.host , args.port , args.workers )
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : dict
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[List[int]]
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : str
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Any
class __magic_name__ ( __a ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase ( _lowercase : ArgumentParser ):
"""simple docstring"""
_UpperCamelCase: Dict = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=_lowercase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=_lowercase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=_lowercase , default=8_888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=_lowercase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=_lowercase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=_lowercase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=_lowercase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=_lowercase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=_lowercase )
def __init__( self : Tuple , _lowercase : Pipeline , _lowercase : str , _lowercase : int , _lowercase : int ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = pipeline
_UpperCamelCase: str = host
_UpperCamelCase: List[str] = port
_UpperCamelCase: Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
_UpperCamelCase: List[str] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=_lowercase , response_class=_lowercase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=_lowercase , response_class=_lowercase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=_lowercase , response_class=_lowercase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=_lowercase , response_class=_lowercase , methods=['''POST'''] , ),
] , timeout=600 , )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase ( self : Dict , _lowercase : str = Body(_lowercase , embed=_lowercase ) , _lowercase : bool = Body(_lowercase , embed=_lowercase ) ):
"""simple docstring"""
try:
_UpperCamelCase: List[str] = self._pipeline.tokenizer.tokenize(_lowercase )
if return_ids:
_UpperCamelCase: Any = self._pipeline.tokenizer.convert_tokens_to_ids(_lowercase )
return ServeTokenizeResult(tokens=_lowercase , tokens_ids=_lowercase )
else:
return ServeTokenizeResult(tokens=_lowercase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_lowercase )} )
def lowerCAmelCase ( self : List[Any] , _lowercase : List[int] = Body(_lowercase , embed=_lowercase ) , _lowercase : bool = Body(_lowercase , embed=_lowercase ) , _lowercase : bool = Body(_lowercase , embed=_lowercase ) , ):
"""simple docstring"""
try:
_UpperCamelCase: List[str] = self._pipeline.tokenizer.decode(_lowercase , _lowercase , _lowercase )
return ServeDeTokenizeResult(model='''''' , text=_lowercase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_lowercase )} )
async def lowerCAmelCase ( self : str , _lowercase : int=Body(_lowercase , embed=_lowercase ) ):
"""simple docstring"""
if len(_lowercase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_UpperCamelCase: Dict = self._pipeline(_lowercase )
return ServeForwardResult(output=_lowercase )
except Exception as e:
raise HTTPException(500 , {'''error''': str(_lowercase )} )
| 271
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( lowercase: Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , lowercase ).groups()[0]
class __magic_name__ ( __a ):
"""simple docstring"""
def __init__( self : Dict , _lowercase : Any , _lowercase : Any=None , _lowercase : List[str]=None ):
"""simple docstring"""
_UpperCamelCase: str = file_names
_UpperCamelCase: List[Any] = image_transform
_UpperCamelCase: Tuple = label_to_id
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : List[str] , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = self.file_names[idx]
_UpperCamelCase: Optional[int] = PIL.Image.open(_lowercase )
_UpperCamelCase: List[str] = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_UpperCamelCase: Optional[Any] = self.image_transform(_lowercase )
_UpperCamelCase: Tuple = extract_label(_lowercase )
if self.label_to_id is not None:
_UpperCamelCase: Any = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: Any ) -> str:
'''simple docstring'''
# Initialize accelerator
if args.with_tracking:
_UpperCamelCase: Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_UpperCamelCase: Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase: List[str] = config['''lr''']
_UpperCamelCase: int = int(config['''num_epochs'''] )
_UpperCamelCase: Optional[Any] = int(config['''seed'''] )
_UpperCamelCase: Optional[Any] = int(config['''batch_size'''] )
_UpperCamelCase: List[str] = config['''image_size''']
if not isinstance(lowercase , (list, tuple) ):
_UpperCamelCase: Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_UpperCamelCase: Any = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCamelCase: List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_UpperCamelCase: Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCamelCase: Union[str, Any] = os.path.split(lowercase )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
_UpperCamelCase: List[str] = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_UpperCamelCase: Optional[Any] = [extract_label(lowercase ) for fname in file_names]
_UpperCamelCase: int = list(set(lowercase ) )
id_to_label.sort()
_UpperCamelCase: Tuple = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
_UpperCamelCase: List[str] = np.random.permutation(len(lowercase ) )
_UpperCamelCase: Dict = int(0.8 * len(lowercase ) )
_UpperCamelCase: Optional[int] = random_perm[:cut]
_UpperCamelCase: Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCamelCase: List[Any] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCamelCase: int = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
_UpperCamelCase: Dict = Compose([Resize(lowercase ), ToTensor()] )
_UpperCamelCase: str = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase: str = create_model('''resnet50d''' , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase: Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCamelCase: Optional[Any] = False
for param in model.get_classifier().parameters():
_UpperCamelCase: Optional[int] = True
# We normalize the batches of images to be a bit faster.
_UpperCamelCase: Union[str, Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_UpperCamelCase: List[Any] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCamelCase: Any = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: List[str] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase: Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCamelCase: List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase: Union[str, Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCamelCase: Union[str, Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCamelCase: Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCamelCase: List[Any] = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
_UpperCamelCase: Any = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
_UpperCamelCase: List[str] = None
else:
_UpperCamelCase: int = int(training_difference.replace('''step_''' , '''''' ) )
_UpperCamelCase: List[Any] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
_UpperCamelCase: Optional[int] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCamelCase: Union[str, Any] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCamelCase: Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: int = (batch['''image'''] - mean) / std
_UpperCamelCase: List[str] = model(lowercase )
_UpperCamelCase: Dict = torch.nn.functional.cross_entropy(lowercase , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
_UpperCamelCase: Any = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCamelCase: List[Any] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
_UpperCamelCase: List[str] = 0
_UpperCamelCase: Union[str, Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: Union[str, Any] = (batch['''image'''] - mean) / std
with torch.no_grad():
_UpperCamelCase: List[Any] = model(lowercase )
_UpperCamelCase: Optional[Any] = outputs.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase: Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_UpperCamelCase: List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCamelCase: Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase ),
'''epoch''': epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
_UpperCamelCase: Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
_UpperCamelCase: Any = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase: str = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=lowercase , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase , default=lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=lowercase , default=lowercase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase , default=lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_UpperCamelCase: Any = parser.parse_args()
_UpperCamelCase: Optional[int] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 271
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Dict = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase : List[Any] = get_sagemaker_input()
else:
_lowerCAmelCase : Optional[int] = get_cluster_input()
return config
def _UpperCAmelCase (UpperCamelCase_ : Tuple=None ):
'''simple docstring'''
if subparsers is not None:
_lowerCAmelCase : List[str] = subparsers.add_parser("""config""" , description=UpperCamelCase_ )
else:
_lowerCAmelCase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=UpperCamelCase_ )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_ )
return parser
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_user_input()
if args.config_file is not None:
_lowerCAmelCase : int = args.config_file
else:
if not os.path.isdir(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
_lowerCAmelCase : List[str] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(UpperCamelCase_ )
else:
config.to_yaml_file(UpperCamelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = config_command_parser()
_lowerCAmelCase : Optional[int] = parser.parse_args()
config_command(UpperCamelCase_ )
if __name__ == "__main__":
main()
| 196
|
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
class __snake_case (_a ):
lowerCAmelCase__ = "masked_bert"
def __init__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Optional[Any]=768 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : List[str]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[Any]=512 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Dict="topK" , _UpperCAmelCase : List[str]="constant" , _UpperCAmelCase : Optional[Any]=0.0 , **_UpperCAmelCase : str , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : Tuple = pruning_method
_lowerCAmelCase : str = mask_init
_lowerCAmelCase : List[str] = mask_scale
| 196
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__snake_case : Any = parser.parse_args()
if args.model_type == "bert":
__snake_case : Union[str, Any] = BertForMaskedLM.from_pretrained(args.model_name)
__snake_case : List[str] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__snake_case : Dict = model.state_dict()
__snake_case : Optional[int] = {}
for w in ["word_embeddings", "position_embeddings"]:
__snake_case : Any = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__snake_case : Optional[Any] = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
__snake_case : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__snake_case : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__snake_case : str = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__snake_case : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__snake_case : Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__snake_case : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__snake_case : Tuple = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__snake_case : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__snake_case : Optional[int] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__snake_case : List[Any] = state_dict['''cls.predictions.decoder.weight''']
__snake_case : str = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
__snake_case : List[Any] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 660
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : List[str] ={
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase__ : Dict =logging.get_logger(__name__)
class __A ( a ):
__A = """mask2former"""
__A = ["""swin"""]
__A = {"""hidden_size""": """hidden_dim"""}
def __init__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = 256 , UpperCAmelCase_ = 256 , UpperCAmelCase_ = 256 , UpperCAmelCase_ = 1024 , UpperCAmelCase_ = "relu" , UpperCAmelCase_ = 6 , UpperCAmelCase_ = 10 , UpperCAmelCase_ = 8 , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 2048 , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = 4 , UpperCAmelCase_ = 255 , UpperCAmelCase_ = 100 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 2.0 , UpperCAmelCase_ = 5.0 , UpperCAmelCase_ = 5.0 , UpperCAmelCase_ = 12544 , UpperCAmelCase_ = 3.0 , UpperCAmelCase_ = 0.7_5 , UpperCAmelCase_ = 0.0_2 , UpperCAmelCase_ = 1.0 , UpperCAmelCase_ = True , UpperCAmelCase_ = [4, 8, 16, 32] , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowerCamelCase =CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =backbone_config.pop("""model_type""" )
lowerCamelCase =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase =config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
lowerCamelCase =backbone_config
lowerCamelCase =feature_size
lowerCamelCase =mask_feature_size
lowerCamelCase =hidden_dim
lowerCamelCase =encoder_feedforward_dim
lowerCamelCase =activation_function
lowerCamelCase =encoder_layers
lowerCamelCase =decoder_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =dropout
lowerCamelCase =dim_feedforward
lowerCamelCase =pre_norm
lowerCamelCase =enforce_input_projection
lowerCamelCase =common_stride
lowerCamelCase =ignore_value
lowerCamelCase =num_queries
lowerCamelCase =no_object_weight
lowerCamelCase =class_weight
lowerCamelCase =mask_weight
lowerCamelCase =dice_weight
lowerCamelCase =train_num_points
lowerCamelCase =oversample_ratio
lowerCamelCase =importance_sample_ratio
lowerCamelCase =init_std
lowerCamelCase =init_xavier_std
lowerCamelCase =use_auxiliary_loss
lowerCamelCase =feature_strides
lowerCamelCase =output_auxiliary_logits
lowerCamelCase =decoder_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def _snake_case ( cls , UpperCAmelCase_ , **UpperCAmelCase_ ):
return cls(
backbone_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def _snake_case ( self ):
lowerCamelCase =copy.deepcopy(self.__dict__ )
lowerCamelCase =self.backbone_config.to_dict()
lowerCamelCase =self.__class__.model_type
return output
| 714
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ : str ={
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ : Union[str, Any] ={
'''Salesforce/codegen-350M-mono''': 20_48,
}
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["""input_ids""", """attention_mask"""]
__A = CodeGenTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="<|endoftext|>" , UpperCAmelCase_="<|endoftext|>" , UpperCAmelCase_="<|endoftext|>" , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
if kwargs.pop("""add_bos_token""" , UpperCAmelCase_ ):
lowerCamelCase =kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
lowerCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase =getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
lowerCamelCase =add_prefix_space
lowerCamelCase =pre_tok_class(**UpperCAmelCase_ )
lowerCamelCase =add_prefix_space
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
lowerCamelCase =kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
lowerCamelCase =kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
lowerCamelCase =super().decode(
token_ids=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
if truncate_before_pattern is not None and len(UpperCAmelCase_ ) > 0:
lowerCamelCase =self.truncate(UpperCAmelCase_ , UpperCAmelCase_ )
return decoded_text
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
def find_re(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =pattern.search(UpperCAmelCase_ , UpperCAmelCase_ )
return m.start() if m else -1
lowerCamelCase =[re.compile(UpperCAmelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase =list(re.finditer("""^print""" , UpperCAmelCase_ , re.MULTILINE ) )
if len(UpperCAmelCase_ ) > 1:
lowerCamelCase =completion[: prints[1].start()]
lowerCamelCase =list(re.finditer("""^def""" , UpperCAmelCase_ , re.MULTILINE ) )
if len(UpperCAmelCase_ ) > 1:
lowerCamelCase =completion[: defs[1].start()]
lowerCamelCase =0
lowerCamelCase =[
pos for pos in [find_re(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase_ ) > 0:
return completion[: min(UpperCAmelCase_ )]
else:
return completion
| 269
| 0
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ : str = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ):
assert _test_patching.open is open
UpperCAmelCase__ : Union[str, Any] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , UpperCamelCase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ):
UpperCAmelCase__ : int = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , UpperCamelCase_ ):
pass
def _UpperCamelCase ( ):
UpperCAmelCase__ : str = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , UpperCamelCase_ ) is None
with patch_submodule(_test_patching , """len""" , UpperCamelCase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ):
UpperCAmelCase__ : Optional[Any] = """__test_patch_submodule_start_and_stop_mock__"""
UpperCAmelCase__ : Dict = patch_submodule(_test_patching , """open""" , UpperCamelCase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ : Dict = """__test_patch_submodule_successive_join__"""
UpperCAmelCase__ : Optional[Any] = """__test_patch_submodule_successive_dirname__"""
UpperCAmelCase__ : List[Any] = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.rename""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , UpperCamelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , UpperCamelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , UpperCamelCase_ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , UpperCamelCase_ ):
pass
| 407
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a_ = {'''UserAgent''': UserAgent().random}
def _a ( UpperCamelCase_ : Any ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = script.contents[0]
lowerCAmelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = F"https://www.instagram.com/{username}/"
lowerCAmelCase__ = self.get_json()
def UpperCAmelCase ( self )-> dict:
'''simple docstring'''
lowerCAmelCase__ = requests.get(self.url , headers=__UpperCAmelCase ).text
lowerCAmelCase__ = BeautifulSoup(__UpperCAmelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self )-> str:
'''simple docstring'''
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self )-> str:
'''simple docstring'''
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["username"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase ( self )-> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def UpperCAmelCase ( self )-> bool:
'''simple docstring'''
return self.user_data["is_private"]
def _a ( UpperCamelCase_ : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
lowerCAmelCase__ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = InstagramUser('''github''')
print(instagram_user)
print(F"{instagram_user.number_of_posts = }")
print(F"{instagram_user.number_of_followers = }")
print(F"{instagram_user.number_of_followings = }")
print(F"{instagram_user.email = }")
print(F"{instagram_user.website = }")
print(F"{instagram_user.profile_picture_url = }")
print(F"{instagram_user.is_verified = }")
print(F"{instagram_user.is_private = }")
| 339
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def A_ ( self ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = """A red cat sitting on a park bench"""
_UpperCamelCase = np.random.RandomState(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 708
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand()
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
def A_ ( a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = huggingface_hub.__version__
_UpperCamelCase = """not installed"""
_UpperCamelCase = """NA"""
if is_torch_available():
import torch
_UpperCamelCase = torch.__version__
_UpperCamelCase = torch.cuda.is_available()
_UpperCamelCase = """not installed"""
if is_transformers_available():
import transformers
_UpperCamelCase = transformers.__version__
_UpperCamelCase = """not installed"""
if is_accelerate_available():
import accelerate
_UpperCamelCase = accelerate.__version__
_UpperCamelCase = """not installed"""
if is_xformers_available():
import xformers
_UpperCamelCase = xformers.__version__
_UpperCamelCase = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(a ) )
return info
@staticmethod
def A_ ( a ) -> Dict:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 202
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_SCREAMING_SNAKE_CASE = imread(r"digital_image_processing/image_data/lena_small.jpg")
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
def __a():
'''simple docstring'''
_lowerCAmelCase = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def __a():
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __a():
'''simple docstring'''
_lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __a():
'''simple docstring'''
_lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def __a():
'''simple docstring'''
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def __a():
'''simple docstring'''
_lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def __a():
'''simple docstring'''
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def __a():
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def __a():
'''simple docstring'''
_lowerCAmelCase = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def __a(SCREAMING_SNAKE_CASE_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
_lowerCAmelCase = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __a(SCREAMING_SNAKE_CASE_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
_lowerCAmelCase = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __a():
'''simple docstring'''
_lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_lowerCAmelCase = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = image[x_coordinate][y_coordinate]
_lowerCAmelCase = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 18
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "FlavaImageProcessor"
__A = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
_lowerCAmelCase = kwargs.pop('feature_extractor' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = self.image_processor
def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[ImageInput] = None , __lowerCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if images is not None:
_lowerCAmelCase = self.image_processor(
__lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(__lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def a ( self : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def a ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def a ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 309
| 0
|
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = botoa.client('''iam''' )
snake_case_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__UpperCAmelCase, AssumeRolePolicyDocument=json.dumps(__UpperCAmelCase, indent=2 ) )
snake_case_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__UpperCAmelCase, PolicyName=F"{role_name}_policy_permission", PolicyDocument=json.dumps(__UpperCAmelCase, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"role {role_name} already exists. Using existing one" )
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = botoa.client('''iam''' )
return iam_client.get_role(RoleName=__UpperCAmelCase )["Role"]["Arn"]
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = _ask_options(
'''How do you want to authorize?''', ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''], __UpperCAmelCase, )
snake_case_ = None
if credentials_configuration == 0:
snake_case_ = _ask_field('''Enter your AWS Profile name: [default] ''', default='''default''' )
snake_case_ = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
snake_case_ = _ask_field('''AWS Access Key ID: ''' )
snake_case_ = aws_access_key_id
snake_case_ = _ask_field('''AWS Secret Access Key: ''' )
snake_case_ = aws_secret_access_key
snake_case_ = _ask_field('''Enter your AWS Region: [us-east-1]''', default='''us-east-1''' )
snake_case_ = aws_region
snake_case_ = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''', ['''Provide IAM Role name''', '''Create new IAM role using credentials'''], __UpperCAmelCase, )
if role_management == 0:
snake_case_ = _ask_field('''Enter your IAM role name: ''' )
else:
snake_case_ = '''accelerate_sagemaker_execution_role'''
print(F"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__UpperCAmelCase )
snake_case_ = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = None
if is_custom_docker_image:
snake_case_ = _ask_field('''Enter your Docker image: ''', lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower() )
snake_case_ = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = None
if is_sagemaker_inputs_enabled:
snake_case_ = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''', lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower(), )
snake_case_ = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = None
if is_sagemaker_metrics_enabled:
snake_case_ = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''', lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower(), )
snake_case_ = _ask_options(
'''What is the distributed mode?''', ['''No distributed training''', '''Data parallelism'''], _convert_sagemaker_distributed_mode, )
snake_case_ = {}
snake_case_ = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
if use_dynamo:
snake_case_ = '''dynamo_'''
snake_case_ = _ask_options(
'''Which dynamo backend would you like to use?''', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
snake_case_ = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
if use_custom_options:
snake_case_ = _ask_options(
'''Which mode do you want to use?''', __UpperCAmelCase, lambda __UpperCAmelCase : TORCH_DYNAMO_MODES[int(__UpperCAmelCase )], default='''default''', )
snake_case_ = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
snake_case_ = _ask_options(
__UpperCAmelCase, __UpperCAmelCase, lambda __UpperCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__UpperCAmelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case_ = _ask_field(__UpperCAmelCase, lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower(), default='''ml.p3.2xlarge''' )
snake_case_ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case_ = _ask_field(
'''How many machines do you want use? [1]: ''', __UpperCAmelCase, default=1, )
snake_case_ = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''', ['''no''', '''fp16''', '''bf16''', '''fp8'''], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=__UpperCAmelCase, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=__UpperCAmelCase, use_cpu=__UpperCAmelCase, dynamo_config=__UpperCAmelCase, eca_instance_type=__UpperCAmelCase, profile=__UpperCAmelCase, region=__UpperCAmelCase, iam_role_name=__UpperCAmelCase, mixed_precision=__UpperCAmelCase, num_machines=__UpperCAmelCase, sagemaker_inputs_file=__UpperCAmelCase, sagemaker_metrics_file=__UpperCAmelCase, )
| 720
|
'''simple docstring'''
import random
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple:
'''simple docstring'''
snake_case_ ,snake_case_ ,snake_case_ = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
if index >= len(__UpperCAmelCase ) or index < 0:
return None
snake_case_ = items[random.randint(0, len(__UpperCAmelCase ) - 1 )]
snake_case_ = 0
snake_case_ ,snake_case_ ,snake_case_ = _partition(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = len(__UpperCAmelCase )
snake_case_ = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase, __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase, index - (m + count) )
| 593
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , a__ : List[Any] , a__ : int=13 , a__ : Union[str, Any]=7 , a__ : Optional[int]=True , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Union[str, Any]=True , a__ : List[str]=99 , a__ : Union[str, Any]=64 , a__ : List[Any]=5 , a__ : Dict=4 , a__ : str=37 , a__ : Any="gelu" , a__ : List[str]=0.1 , a__ : Tuple=0.1 , a__ : Union[str, Any]=512 , a__ : Any=16 , a__ : Optional[int]=2 , a__ : Tuple=0.02 , a__ : Any=3 , a__ : Tuple=4 , a__ : Any=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = vocab_size - 1
def __snake_case ( self : List[str] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case ( self : Optional[int] ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = True
return config, input_ids, input_mask, token_labels
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Optional[int] , a__ : Optional[Any] ):
UpperCAmelCase = GPTNeoXModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , attention_mask=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[str] , a__ : List[Any] , a__ : str , a__ : Optional[Any] ):
UpperCAmelCase = True
UpperCAmelCase = GPTNeoXModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[Any] , a__ : int , a__ : List[str] , a__ : Any , a__ : int ):
UpperCAmelCase = GPTNeoXForCausalLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : List[Any] , a__ : Dict ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = GPTNeoXForQuestionAnswering(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , attention_mask=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : Tuple , a__ : Optional[Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = GPTNeoXForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Tuple , a__ : Any , a__ : Tuple , a__ : List[str] , a__ : Any ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = GPTNeoXForTokenClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Dict , a__ : str , a__ : Dict , a__ : List[Any] ):
UpperCAmelCase = True
UpperCAmelCase = GPTNeoXForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , use_cache=_A )
UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase = model(_A , attention_mask=_A , output_hidden_states=_A )
UpperCAmelCase = output_from_no_past['''hidden_states'''][0]
UpperCAmelCase = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def __snake_case ( self : int ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase =(GPTNeoXForCausalLM,) if is_torch_available() else ()
_lowerCamelCase =(
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Tuple ):
UpperCAmelCase = GPTNeoXModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , hidden_size=64 , num_attention_heads=8 )
def __snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def __snake_case ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A )
def __snake_case ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __snake_case ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def __snake_case ( self : Any ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __snake_case ( self : Any , a__ : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = GPTNeoXModel(_A )
original_model.to(_A )
original_model.eval()
UpperCAmelCase = original_model(_A ).last_hidden_state
UpperCAmelCase = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = {'''type''': scaling_type, '''factor''': 10.0}
UpperCAmelCase = GPTNeoXModel(_A )
scaled_model.to(_A )
scaled_model.eval()
UpperCAmelCase = scaled_model(_A ).last_hidden_state
UpperCAmelCase = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : int ):
UpperCAmelCase = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
UpperCAmelCase = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_A )
UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
UpperCAmelCase = model.generate(**_A , do_sample=_A , max_new_tokens=20 )
UpperCAmelCase = tokenizer.batch_decode(_A )[0]
self.assertEqual(_A , _A )
| 51
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = XLMRobertaTokenizer
_UpperCamelCase : Dict = XLMRobertaTokenizerFast
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Tuple = True
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : int = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : Tuple = '''<pad>'''
lowercase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_002 )
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = XLMRobertaTokenizer(_A , keep_accents=_A )
lowercase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase : List[Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase : Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowercase : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
lowercase : str = tempfile.mkdtemp()
lowercase : Optional[int] = tokenizer_r.save_pretrained(_A )
lowercase : Any = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase : List[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase : Any = tokenizer_r.from_pretrained(_A )
lowercase : List[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
lowercase : List[Any] = tempfile.mkdtemp()
lowercase : List[str] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase : Optional[Any] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase : str = tokenizer_r.from_pretrained(_A )
lowercase : List[str] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
lowercase : Union[str, Any] = tempfile.mkdtemp()
lowercase : int = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase : str = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase : Dict = tokenizer_r.from_pretrained(_A )
lowercase : List[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def __a ( self : Any ) -> Any:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
lowercase : List[str] = XLMRobertaTokenizer(f.name , keep_accents=_A )
lowercase : List[str] = pickle.dumps(_A )
pickle.loads(_A )
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : str = self.get_rust_tokenizer()
lowercase : List[str] = '''I was born in 92000, and this is falsé.'''
lowercase : Tuple = tokenizer.tokenize(_A )
lowercase : Tuple = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
lowercase : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowercase : int = self.get_rust_tokenizer()
lowercase : Tuple = tokenizer.encode(_A )
lowercase : Optional[Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def __a ( self : str ) -> str:
"""simple docstring"""
lowercase : int = '''Hello World!'''
lowercase : Union[str, Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : str = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 217
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
UpperCamelCase = {"""facebook/blenderbot-3B""": 1_28}
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
UpperCamelCase__ = BlenderbotTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="replace" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=False , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> Any:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowercase__: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowercase__: Optional[int] = getattr(UpperCAmelCase_ , pre_tok_state.pop("type"))
lowercase__: Optional[int] = add_prefix_space
lowercase__: Tuple = pre_tok_class(**UpperCAmelCase_)
lowercase__: int = add_prefix_space
lowercase__: Union[str, Any] = "post_processor"
lowercase__: Dict = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
if tokenizer_component_instance:
lowercase__: Tuple = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__: List[Any] = tuple(state["sep"])
if "cls" in state:
lowercase__: str = tuple(state["cls"])
lowercase__: Union[str, Any] = False
if state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowercase__: Tuple = add_prefix_space
lowercase__: Dict = True
if state.get("trim_offsets" , UpperCAmelCase_) != trim_offsets:
lowercase__: str = trim_offsets
lowercase__: List[str] = True
if changes_to_apply:
lowercase__: List[str] = getattr(UpperCAmelCase_ , state.pop("type"))
lowercase__: Optional[int] = component_class(**UpperCAmelCase_)
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
lowercase__: int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else value
lowercase__: str = value
def __lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> BatchEncoding:
'''simple docstring'''
lowercase__: Union[str, Any] = kwargs.get("is_split_into_words" , UpperCAmelCase_)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> BatchEncoding:
'''simple docstring'''
lowercase__: Tuple = kwargs.get("is_split_into_words" , UpperCAmelCase_)
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None) -> Tuple[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None) -> List[int]:
'''simple docstring'''
lowercase__: str = [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None) -> str:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __lowercase ( self , UpperCAmelCase_) -> List[int]:
'''simple docstring'''
lowercase__: Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
lowercase__: List[str] = " ".join(UpperCAmelCase_)
lowercase__: Any = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
lowercase__: Union[str, Any] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""")
return input_ids
| 120
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 120
| 1
|
__UpperCamelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def UpperCamelCase_( _A :dict , _A :List[str] , _A :Optional[int] )-> list[str]:
UpperCamelCase__ = set()
# keep track of all the paths to be checked
UpperCamelCase__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase__ = queue.pop(0 )
# get the last node from the path
UpperCamelCase__ = path[-1]
if node not in explored:
UpperCamelCase__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase__ = list(_A )
new_path.append(_A )
queue.append(_A )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_A )
# in case there's no path between the 2 nodes
return []
def UpperCamelCase_( _A :dict , _A :Any , _A :Dict )-> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase__ = [start]
UpperCamelCase__ = set(_A )
# Keep tab on distances from `start` node.
UpperCamelCase__ = {start: 0, target: -1}
while queue:
UpperCamelCase__ = queue.pop(0 )
if node == target:
UpperCamelCase__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_A )
queue.append(_A )
UpperCamelCase__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 551
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__, UpperCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__ = controlnet_params
UpperCamelCase__ = "bird"
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
UpperCamelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase__ = replicate(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__, UpperCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__ = controlnet_params
UpperCamelCase__ = "Chef in the kitchen"
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
UpperCamelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase__ = replicate(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 551
| 1
|
def a__ ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
raise TypeError('''only integers accepted as input''' )
else:
__SCREAMING_SNAKE_CASE : Dict = str(abs(snake_case ) )
__SCREAMING_SNAKE_CASE : List[Any] = [list(snake_case ) for char in range(len(snake_case ) )]
for index in range(len(snake_case ) ):
num_transpositions[index].pop(snake_case )
return max(
int(''''''.join(list(snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 711
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = R'''\w+[.]\d+'''
__SCREAMING_SNAKE_CASE : Optional[int] = re.findall(snake_case , snake_case )
for pat in pats:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(snake_case , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( snake_case , snake_case , snake_case=42 ):
"""simple docstring"""
# Step 1: Convert pytorch tensor to numpy
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE : Dict = flax_model.init_weights(PRNGKey(snake_case ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = flatten_dict(snake_case )
__SCREAMING_SNAKE_CASE : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Dict = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = rename_key_and_reshape_tensor(snake_case , snake_case , snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : Any = jnp.asarray(snake_case )
return unflatten_dict(snake_case )
| 131
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class A ( __lowercase ):
_snake_case ='''microsoft/speecht5_tts'''
_snake_case =(
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_snake_case ='''text_reader'''
_snake_case =SpeechTaProcessor
_snake_case =SpeechTaForTextToSpeech
_snake_case =SpeechTaHifiGan
_snake_case =['''text''']
_snake_case =['''audio''']
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
if self.post_processor is None:
UpperCAmelCase_ ="microsoft/speecht5_hifigan"
super().setup()
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: List[str] , _lowerCAmelCase: Any=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.pre_processor(text=_lowerCAmelCase , return_tensors="pt" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
UpperCAmelCase_ =load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
UpperCAmelCase_ =torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 54
|
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def __lowercase ( ) ->None:
'''simple docstring'''
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 177
| 0
|
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> List[Any]:
UpperCAmelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowercase_ ) != 0:
UpperCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowercase_ ) != cols:
raise error
for value in row:
if not isinstance(lowercase_ , (int, float) ):
raise error
UpperCAmelCase = rows
else:
UpperCAmelCase = []
def a_ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a_ ( self ) -> int:
return len(self.rows )
@property
def a_ ( self ) -> int:
return len(self.rows[0] )
@property
def a_ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def a_ ( self ) -> bool:
return self.order[0] == self.order[1]
def a_ ( self ) -> Matrix:
UpperCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowercase_ )
def a_ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a_ ( self ) -> bool:
return bool(self.determinant() )
def a_ ( self , lowercase_ , lowercase_ ) -> int:
UpperCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowercase_ ).determinant()
def a_ ( self , lowercase_ , lowercase_ ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowercase_ , lowercase_ )
return -1 * self.get_minor(lowercase_ , lowercase_ )
def a_ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(lowercase_ , lowercase_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a_ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a_ ( self ) -> Matrix:
UpperCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowercase_ )
def a_ ( self ) -> Matrix:
UpperCAmelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowercase_ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def a_ ( self , lowercase_ , lowercase_ = None ) -> None:
UpperCAmelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowercase_ , lowercase_ ):
raise type_error
for value in row:
if not isinstance(lowercase_ , (int, float) ):
raise type_error
if len(lowercase_ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowercase_ )
else:
UpperCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def a_ ( self , lowercase_ , lowercase_ = None ) -> None:
UpperCAmelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowercase_ , lowercase_ ):
raise type_error
for value in column:
if not isinstance(lowercase_ , (int, float) ):
raise type_error
if len(lowercase_ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
UpperCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowercase_ ) -> bool:
if not isinstance(lowercase_ , lowercase_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowercase_ ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , lowercase_ ) -> Matrix:
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowercase_ ) -> Matrix:
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowercase_ ) -> Matrix:
if isinstance(lowercase_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowercase_ , lowercase_ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowercase_ , lowercase_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , lowercase_ ) -> Matrix:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
UpperCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a_ ( cls , lowercase_ , lowercase_ ) -> int:
return sum(row[i] * column[i] for i in range(len(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : str = (DDPMScheduler,)
def a_ ( self , **lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def a_ ( self ) -> int:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def a_ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def a_ ( self ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def a_ ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def a_ ( self ) -> Tuple:
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def a_ ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1E-5
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
UpperCAmelCase = -1
else:
UpperCAmelCase = timesteps[i + 1]
UpperCAmelCase = scheduler.previous_timestep(lowercase_ )
UpperCAmelCase = prev_t.item()
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase = len(lowercase_ )
with self.assertRaises(lowercase_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 183
| 0
|
'''simple docstring'''
from math import isqrt
def lowercase__ ( __UpperCamelCase )-> list[int]:
UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def lowercase__ ( __UpperCamelCase = 10**8 )-> int:
UpperCamelCase = calculate_prime_numbers(max_number // 2 )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 301
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowercase__ ( __UpperCamelCase = "AAPL" )-> str:
UpperCamelCase = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCamelCase = BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
UpperCamelCase = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 301
| 1
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,*_a : List[Any] ,_a : Optional[Any]=None ,_a : int=None ,**_a : Tuple ):
'''simple docstring'''
super().__init__(*_a ,**_a )
A_ : Tuple = eval_examples
A_ : Optional[int] = post_process_function
def _a ( self : Tuple ,_a : Tuple=None ,_a : Union[str, Any]=None ,_a : List[Any]=None ,_a : str = "eval" ):
'''simple docstring'''
A_ : str = self.eval_dataset if eval_dataset is None else eval_dataset
A_ : Optional[Any] = self.get_eval_dataloader(_a )
A_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A_ : List[str] = self.compute_metrics
A_ : Tuple = None
A_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A_ : List[str] = time.time()
try:
A_ : Dict = eval_loop(
_a ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
A_ : Optional[Any] = compute_metrics
A_ : Tuple = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A_ : int = self.post_process_function(_a ,_a ,output.predictions )
A_ : str = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : List[Any] = metrics.pop(_a )
metrics.update(output.metrics )
else:
A_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A_ : List[Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_a )
return metrics
def _a ( self : Optional[Any] ,_a : Dict ,_a : List[Any] ,_a : List[Any]=None ,_a : str = "test" ):
'''simple docstring'''
A_ : str = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
A_ : Any = self.compute_metrics
A_ : List[Any] = None
A_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A_ : int = time.time()
try:
A_ : str = eval_loop(
_a ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
A_ : Tuple = compute_metrics
A_ : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A_ : Any = self.post_process_function(_a ,_a ,output.predictions ,"""predict""" )
A_ : Union[str, Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : str = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_a )
| 706
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase : Dict = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : bool , _lowerCAmelCase : str = None , _lowerCAmelCase : list = None ) -> int:
"""simple docstring"""
__lowercase = None
__lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowercase = os.path.abspath("""examples""" )
for item in os.listdir(_lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
__lowercase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCAmelCase , feature_script=_lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowercase = compare_against_test(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = """\n""".join(_lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
__lowercase = diff.replace(_lowerCAmelCase , """""" )
self.assertEqual(_lowerCAmelCase , """""" )
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = False
@classmethod
def _a ( cls : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _a ( cls : Dict ) -> Tuple:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
__lowercase = re.findall("""({.+})""" , _lowerCAmelCase )
__lowercase = [r for r in results if """accuracy""" in r][-1]
__lowercase = ast.literal_eval(_lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__lowercase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """tracking""" ) ) )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 80
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCAmelCase( __A=2 , __A=3 , __A=16 , __A = 10 , __A = 2 ):
def get_dataset(__A ):
UpperCAmelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__A , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase = get_dataset(__A )
UpperCAmelCase = get_dataset(__A )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCAmelCase( __A , __A , __A , __A , __A , __A=None ):
UpperCAmelCase = []
for epoch in range(__A ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase = batch
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.mse_loss(__A , __A )
accelerator.backward(__A )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module ):
def __init__( self : Union[str, Any] ) -> List[Any]:
super().__init__()
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase__ , automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
UpperCAmelCase = Accelerator(project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
# Train baseline
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
UpperCAmelCase = os.path.join(lowerCAmelCase__ , "initial" )
accelerator.save_state(lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
UpperCAmelCase = os.path.join(lowerCAmelCase__ , "checkpoint" )
accelerator.save_state(lowerCAmelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase__ )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase__ )
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_0" ) )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
UpperCAmelCase = torch.tensor([1, 2, 3] )
UpperCAmelCase = torch.tensor([2, 3, 4] )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(net.parameters() )
UpperCAmelCase = Accelerator()
with self.assertRaises(lowerCAmelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _UpperCamelCase ( self : Tuple ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase = torch.optim.lr_scheduler.StepLR(lowerCAmelCase__ , step_size=1 , gamma=0.99 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
UpperCAmelCase = scheduler.state_dict()
train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(lowerCAmelCase__ , scheduler.state_dict() )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ , total_limit=2 )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase = accelerator.prepare(lowerCAmelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def _UpperCamelCase ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = "/tmp/accelerate/state_checkpointing"
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowerCAmelCase__ = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowerCAmelCase__ = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1
| 1
|
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
_lowercase : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 641
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=[30, 30] , _snake_case=2 , _snake_case=3 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , _snake_case=8 , _snake_case=10 , ) -> Optional[Any]:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = num_labels
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Tuple = n_targets
UpperCAmelCase_ : List[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase_ : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase_ : Optional[int] = num_patches + 1 + self.num_detection_tokens
def _snake_case ( self) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase_ : int = []
for i in range(self.batch_size):
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_snake_case)
UpperCAmelCase_ : List[str] = torch.rand(self.n_targets , 4 , device=_snake_case)
labels.append(_snake_case)
UpperCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self) -> Optional[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> List[Any]:
UpperCAmelCase_ : int = YolosModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[str] = model(_snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Any:
UpperCAmelCase_ : Any = YolosForObjectDetection(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[str] = model(pixel_values=_snake_case)
UpperCAmelCase_ : List[str] = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
UpperCAmelCase_ : Tuple = model(pixel_values=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def _snake_case ( self) -> str:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a_, a_, unittest.TestCase ):
_lowerCamelCase : Union[str, Any]= (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCamelCase : Dict= (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCamelCase : str= False
_lowerCamelCase : Optional[Any]= False
_lowerCamelCase : Tuple= False
_lowerCamelCase : str= False
def _snake_case ( self , _snake_case , _snake_case , _snake_case=False) -> str:
UpperCAmelCase_ : Optional[int] = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase_ : Optional[int] = []
for i in range(self.model_tester.batch_size):
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_snake_case , dtype=torch.long)
UpperCAmelCase_ : str = torch.ones(
self.model_tester.n_targets , 4 , device=_snake_case , dtype=torch.float)
labels.append(_snake_case)
UpperCAmelCase_ : Union[str, Any] = labels
return inputs_dict
def _snake_case ( self) -> Any:
UpperCAmelCase_ : Dict = YolosModelTester(self)
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37)
def _snake_case ( self) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self) -> List[str]:
# YOLOS does not use inputs_embeds
pass
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(_snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear))
def _snake_case ( self) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_snake_case)
UpperCAmelCase_ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case)
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = True
# in YOLOS, the seq_len is different
UpperCAmelCase_ : str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ : Tuple = outputs.attentions
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Any = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ : List[str] = outputs.attentions
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase_ : Union[str, Any] = len(_snake_case)
# Check attention is always last and order is fine
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ : Dict = 1
self.assertEqual(out_len + added_hidden_states , len(_snake_case))
UpperCAmelCase_ : Union[str, Any] = outputs.attentions
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _snake_case ( self) -> Optional[int]:
def check_hidden_states_output(_snake_case , _snake_case , _snake_case):
UpperCAmelCase_ : int = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Any = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ : Tuple = outputs.hidden_states
UpperCAmelCase_ : Dict = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_snake_case) , _snake_case)
# YOLOS has a different seq_length
UpperCAmelCase_ : Any = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def _snake_case ( self) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_snake_case)
@slow
def _snake_case ( self) -> Optional[int]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = YolosModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def SCREAMING_SNAKE_CASE( ) -> int:
UpperCAmelCase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def _snake_case ( self) -> List[str]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ : Optional[int] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_snake_case)
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_snake_case , return_tensors='pt').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(inputs.pixel_values)
# verify outputs
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=_snake_case , )
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _snake_case , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _snake_case , atol=1e-4))
# verify postprocessing
UpperCAmelCase_ : Union[str, Any] = image_processor.post_process_object_detection(
_snake_case , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
UpperCAmelCase_ : Tuple = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861]).to(_snake_case)
UpperCAmelCase_ : Dict = [75, 75, 17, 63, 17]
UpperCAmelCase_ : List[str] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495]).to(_snake_case)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _snake_case , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _snake_case)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _snake_case))
| 471
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( a_ ):
_lowerCamelCase : torch.FloatTensor
class lowercase ( a_, a_ ):
@register_to_config
def __init__( self , _snake_case = 6_5536 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case)
UpperCAmelCase_ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case)
UpperCAmelCase_ : List[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ : Dict = block_out_channels[0] * 4
UpperCAmelCase_ : List[Any] = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
UpperCAmelCase_ : int = nn.ModuleList([])
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = nn.ModuleList([])
UpperCAmelCase_ : Any = None
# down
UpperCAmelCase_ : Dict = in_channels
for i, down_block_type in enumerate(_snake_case):
UpperCAmelCase_ : int = output_channel
UpperCAmelCase_ : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ : int = i == len(_snake_case) - 1
UpperCAmelCase_ : Any = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case)
# mid
UpperCAmelCase_ : Optional[int] = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case))
UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ : Tuple = out_channels
else:
UpperCAmelCase_ : int = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case):
UpperCAmelCase_ : Dict = output_channel
UpperCAmelCase_ : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels
)
UpperCAmelCase_ : str = i == len(_snake_case) - 1
UpperCAmelCase_ : Union[str, Any] = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case)
UpperCAmelCase_ : Dict = output_channel
# out
UpperCAmelCase_ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
UpperCAmelCase_ : Any = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case = True , ) -> Union[UNetaDOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = timestep
if not torch.is_tensor(_snake_case):
UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0:
UpperCAmelCase_ : Tuple = timesteps[None].to(sample.device)
UpperCAmelCase_ : Any = self.time_proj(_snake_case)
if self.config.use_timestep_embedding:
UpperCAmelCase_ : int = self.time_mlp(_snake_case)
else:
UpperCAmelCase_ : int = timestep_embed[..., None]
UpperCAmelCase_ : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
UpperCAmelCase_ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
UpperCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = downsample_block(hidden_states=_snake_case , temb=_snake_case)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ : List[Any] = self.mid_block(_snake_case , _snake_case)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
UpperCAmelCase_ : int = down_block_res_samples[-1:]
UpperCAmelCase_ : Tuple = down_block_res_samples[:-1]
UpperCAmelCase_ : List[Any] = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case)
# 5. post-process
if self.out_block:
UpperCAmelCase_ : Optional[Any] = self.out_block(_snake_case , _snake_case)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case)
| 471
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.